code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
'''
Hello student. Thank you for downloading a CORGIS library. However, you do not need to open this library. Instead you should use the following:
import suicide_attacks
If you opened the file because you are curious how this library works, then well done! We hope that you find it a useful learning experience. However, you should know that this code is meant to solve somewhat esoteric pedagogical problems, so it is often not best practices.
'''
import sys as _sys
import os as _os
import json as _json
import sqlite3 as _sql
import difflib as _difflib
def _tifa_definitions():
return {"type": "ModuleType",
"fields": {
'get': {
"type": "FunctionType",
"name": 'get',
"returns": {
"type": "ListType",
"empty": False,
"subtype": {"type": "NumType"}
}
},
'get_attacks': {
"type": "FunctionType",
"name": 'get_attacks',
"returns":
{"type": "ListType", "subtype":
{"type": "DictType", "literals": [{"type": "LiteralStr", "value": 'statistics'}, {"type": "LiteralStr", "value": 'campaign'}, {"type": "LiteralStr", "value": 'attacker'}, {"type": "LiteralStr", "value": 'target'}, {"type": "LiteralStr", "value": 'date'}, {"type": "LiteralStr", "value": 'groups'}], "values": [
{"type": "DictType", "literals": [{"type": "LiteralStr", "value": '# wounded'}, {"type": "LiteralStr", "value": '# killed'}], "values": [
{"type": "NumType"},
{"type": "NumType"}]},
{"type": "DictType", "literals": [{"type": "LiteralStr", "value": 'name'}, {"type": "LiteralStr", "value": 'sides'}], "values": [
{"type": "StrType"},
{"type": "ListType", "subtype":
{"type": "StrType"}}]},
{"type": "DictType", "literals": [{"type": "LiteralStr", "value": 'demographics'}, {"type": "LiteralStr", "value": 'age'}, {"type": "LiteralStr", "value": 'birth'}, {"type": "LiteralStr", "value": 'name'}], "values": [
{"type": "DictType", "literals": [{"type": "LiteralStr", "value": 'religion'}, {"type": "LiteralStr", "value": 'marital'}, {"type": "LiteralStr", "value": 'occupation'}, {"type": "LiteralStr", "value": 'education'}, {"type": "LiteralStr", "value": 'gender'}], "values": [
{"type": "StrType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "StrType"}]},
{"type": "NumType"},
{"type": "DictType", "literals": [{"type": "LiteralStr", "value": 'location'}, {"type": "LiteralStr", "value": 'year'}], "values": [
{"type": "StrType"},
{"type": "NumType"}]},
{"type": "StrType"}]},
{"type": "DictType", "literals": [{"type": "LiteralStr", "value": 'location'}, {"type": "LiteralStr", "value": 'country'}, {"type": "LiteralStr", "value": 'type'}, {"type": "LiteralStr", "value": 'name'}, {"type": "LiteralStr", "value": 'assassination?'}, {"type": "LiteralStr", "value": 'weapon'}], "values": [
{"type": "StrType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "NumType"},
{"type": "StrType"}]},
{"type": "DictType", "literals": [{"type": "LiteralStr", "value": 'year'}, {"type": "LiteralStr", "value": 'day'}, {"type": "LiteralStr", "value": 'month'}], "values": [
{"type": "NumType"},
{"type": "NumType"},
{"type": "NumType"}]},
{"type": "ListType", "subtype":
{"type": "StrType"}}]}},
}
}
}
class _Constants(object):
'''
Global singleton object to hide some of the constants; some IDEs reveal internal module details very aggressively, and there's no other way to hide stuff.
'''
_HEADER = {'User-Agent':
'CORGIS Suicide Attacks library for educational purposes'}
_PYTHON_3 = _sys.version_info >= (3, 0)
_TEST = False
_HARDWARE = 1000
if _Constants._PYTHON_3:
import urllib.request as _request
from urllib.parse import quote_plus as _quote_plus
from urllib.error import HTTPError as _HTTPError
else:
import urllib2 as _urllib2
from urllib import quote_plus as _quote_plus
from urllib2 import HTTPError as _HTTPError
class DatasetException(Exception):
''' Thrown when there is an error loading the dataset for some reason.'''
pass
_Constants._DATABASE_NAME = _os.path.join(_os.path.dirname(__file__),
"suicide_attacks.db")
if not _os.access(_Constants._DATABASE_NAME, _os.F_OK):
raise DatasetException("Error! Could not find a \"{0}\" file. Make sure that there is a \"{0}\" in the same directory as \"{1}.py\"! Spelling is very important here.".format(_Constants._DATABASE_NAME, __name__))
elif not _os.access(_Constants._DATABASE_NAME, _os.R_OK):
raise DatasetException("Error! Could not read the \"{0}\" file. Make sure that it readable by changing its permissions. You may need to get help from your instructor.".format(_Constants._DATABASE_NAME, __name__))
elif not _os.access(_Constants._DATABASE_NAME, _os.W_OK):
# Previously, this generated an error - but that's not important, really.
#_sys.stderr.write('The local cache (\" \") will not be updated. Make sure that it is writable by changing its permissions. You may need to get help from your instructor.\n'.format(_Constants._DATABASE_NAME))
#_sys.stderr.flush()
pass
_Constants._DATABASE = _sql.connect(_Constants._DATABASE_NAME)
class _Auxiliary(object):
@staticmethod
def _parse_type(value, type_func):
"""
Attempt to cast *value* into *type_func*, returning *default* if it fails.
"""
default = type_func(0)
if value is None:
return default
try:
return type_func(value)
except ValueError:
return default
@staticmethod
def _byteify(input):
"""
Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list,
"""
if isinstance(input, dict):
return {_Auxiliary._byteify(key): _Auxiliary._byteify(value) for key, value in input.items()}
elif isinstance(input, list):
return [_Auxiliary._byteify(element) for element in input]
elif _Constants._PYTHON_3 and isinstance(input, str):
return str(input.encode('ascii', 'replace').decode('ascii'))
elif not _Constants._PYTHON_3 and isinstance(input, unicode):
return str(input.encode('ascii', 'replace').decode('ascii'))
else:
return input
@staticmethod
def _guess_schema(input):
if isinstance(input, dict):
return {str(key.encode('ascii', 'replace').decode('ascii')):
_Auxiliary._guess_schema(value) for key, value in input.items()}
elif isinstance(input, list):
return [_Auxiliary._guess_schema(input[0])] if input else []
else:
return type(input)
################################################################################
# Domain Objects
################################################################################
################################################################################
# Interfaces
################################################################################
def get_attacks(test=False):
"""
Returns a list of the attacks in the database.
"""
if _Constants._TEST or test:
rows = _Constants._DATABASE.execute("SELECT data FROM suicide_attacks LIMIT {hardware}".format(
hardware=_Constants._HARDWARE))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
else:
rows = _Constants._DATABASE.execute("SELECT data FROM suicide_attacks".format(
hardware=_Constants._HARDWARE))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
################################################################################
# Internalized testing code
################################################################################
def _test_interfaces():
from pprint import pprint as _pprint
from timeit import default_timer as _default_timer
# Production test
print("Production get_attacks")
start_time = _default_timer()
result = get_attacks()
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
# Test test
print("Test get_attacks")
start_time = _default_timer()
result = get_attacks(test=True)
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
if __name__ == '__main__':
from optparse import OptionParser as _OptionParser
_parser = _OptionParser()
_parser.add_option("-t", "--test", action="store_true",
default=False,
help="Execute the interfaces to test them.")
(_options, _args) = _parser.parse_args()
if _options.test:
_test_interfaces() | RealTimeWeb/datasets | datasets/python/suicide_attacks/suicide_attacks.py | Python | gpl-2.0 | 9,512 |
class Instance():
def __init__(self, conn):
self.conn = conn
def prepare():
pass
def put_config():
pass
def status():
pass
def terminate():
pass
def is_master():
pass
def run_slave(instance):
pass
def run_master(instance):
pass
class InstanceManager():
def __init__(self, conn):
self.conn = conn
def create(self):
pass
| kamil/locust_cloud | instance.py | Python | mit | 457 |
import json
# Transliteration map from Cyrillic to Latin script
with open('milanbot/transliteration.json') as json_file:
cyrillic_transliteration = json.load(json_file)
# Supported languages that 'MilanBot' works with
with open('milanbot/languages.json') as json_file:
languages = json.load(json_file)
sparql_disambiguation = \
'SELECT ?item WHERE {?item wdt:P31 wd:Q4167410 }'
sparql_disambiguation_sr = \
'SELECT ?item WHERE { ?item wdt:P31 wd:Q4167410 . ' \
'?wiki0 schema:about ?item . ' \
'?wiki0 schema:isPartOf <https://sh.wikipedia.org/> }'
sparql_people = \
'SELECT ?item WHERE { ?item wdt:P31 wd:Q5 . ' \
'?wiki0 schema:about ?item . ' \
'?wiki0 schema:isPartOf <https://sr.wikipedia.org/> }'
| milanjelisavcic/milanbot | milanbot/__init__.py | Python | unlicense | 743 |
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
class AttachmentsWidgetBase(ForeignKeyRawIdWidget):
template_name = None
def render(self, name, value, attrs=None):
hidden_input = super(ForeignKeyRawIdWidget, self).render(
name, value, attrs)
css_id = attrs.get('id', '')
lookup_url = reverse('admin:%s_%s_changelist' % (
self.rel.to._meta.app_label, self.rel.to._meta.model_name))
obj = self.obj_for_value(value)
context = {
'hidden_input': hidden_input,
'id': css_id,
'lookup_name': name,
'lookup_url': lookup_url,
'obj': obj
}
html = render_to_string(self.template_name, context)
return mark_safe(html)
def obj_for_value(self, value):
try:
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
except:
obj = None
return obj
class Media:
css = {
'all': ('attachments/css/widget.css',)
}
class ImageWidget(AttachmentsWidgetBase):
template_name = 'attachments/image/widget.html'
class DocumentWidget(AttachmentsWidgetBase):
template_name = 'attachments/document/widget.html' | zdot/django-attachments | attachments/widgets.py | Python | bsd-3-clause | 1,476 |
#! /usr/bin/python
import compizconfig
from gi.repository import Gdk
screen= Gdk.Screen.get_default()
n = screen.get_number()
context = compizconfig.Context(n)
print context.CurrentProfile.Name
| jokerdino/unity-tweak-tool | notes/wizardry.py | Python | gpl-3.0 | 196 |
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Dummy easyblock for OpenBLAS
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.framework.easyblock import EasyBlock
class EB_OpenBLAS(EasyBlock):
pass
| hpcugent/easybuild-framework | test/framework/sandbox/easybuild/easyblocks/o/openblas.py | Python | gpl-2.0 | 1,222 |
# Copyright 2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test maasserver nodes views."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = []
from django.core.urlresolvers import reverse
from lxml.etree import XPath
from lxml.html import fromstring
from maasserver.testing import get_content_links
from maasserver.testing.factory import factory
from maasserver.testing.testcase import LoggedInTestCase
from maasserver.views import tags as tags_views
from maastesting.matchers import ContainsAll
class TagViewsTest(LoggedInTestCase):
def test_view_tag_displays_tag_info(self):
# The tag page features the basic information about the tag.
tag = factory.make_tag(name='the-named-tag',
comment='Human description of the tag',
definition='//xpath')
tag_link = reverse('tag-view', args=[tag.name])
response = self.client.get(tag_link)
doc = fromstring(response.content)
content_text = doc.cssselect('#content')[0].text_content()
self.assertThat(
content_text, ContainsAll([tag.comment, tag.definition]))
def test_view_tag_includes_node_links(self):
tag = factory.make_tag()
node = factory.make_node()
node.tags.add(tag)
mac = factory.make_mac_address(node=node).mac_address
tag_link = reverse('tag-view', args=[tag.name])
node_link = reverse('node-view', args=[node.system_id])
response = self.client.get(tag_link)
doc = fromstring(response.content)
content_text = doc.cssselect('#content')[0].text_content()
self.assertThat(
content_text, ContainsAll([unicode(mac), '%s' % node.hostname]))
self.assertNotIn(node.system_id, content_text)
self.assertIn(node_link, get_content_links(response))
def test_view_tag_num_queries_is_independent_of_num_nodes(self):
tag = factory.make_tag()
tag_link = reverse('tag-view', args=[tag.name])
nodegroup = factory.make_node_group()
nodes = [factory.make_node(nodegroup=nodegroup, mac=True)
for i in range(20)]
for node in nodes[:10]:
node.tags.add(tag)
num_queries, response = self.getNumQueries(self.client.get, tag_link)
self.assertEqual(
10,
len([link for link in get_content_links(response)
if link.startswith('/nodes/node')]))
# Need to get the tag, and the nodes, and the macs of the nodes
self.assertTrue(num_queries > 3)
for node in nodes[10:]:
node.tags.add(tag)
num_bonus_queries, response = self.getNumQueries(
self.client.get, tag_link)
self.assertEqual(num_queries, num_bonus_queries)
self.assertEqual(
20,
len([link for link in get_content_links(response)
if link.startswith('/nodes/node')]))
def test_view_tag_hides_private_nodes(self):
tag = factory.make_tag()
node = factory.make_node()
node2 = factory.make_node(owner=factory.make_user())
node.tags.add(tag)
node2.tags.add(tag)
tag_link = reverse('tag-view', args=[tag.name])
response = self.client.get(tag_link)
doc = fromstring(response.content)
content_text = doc.cssselect('#content')[0].text_content()
self.assertIn(node.hostname, content_text)
self.assertNotIn(node2.hostname, content_text)
def test_view_tag_shows_kernel_params(self):
tag = factory.make_tag(kernel_opts='--test tag params')
node = factory.make_node()
node.tags = [tag]
tag_link = reverse('tag-view', args=[tag.name])
response = self.client.get(tag_link)
doc = fromstring(response.content)
kernel_opts = doc.cssselect('.kernel-opts-tag')[0].text_content()
self.assertIn('Kernel Parameters', kernel_opts)
self.assertIn(tag.kernel_opts, kernel_opts)
def test_view_tag_paginates_nodes(self):
"""Listing of nodes with tag is split across multiple pages
Copy-coded from NodeViewsTest.test_node_list_paginates evilly.
"""
# Set a very small page size to save creating lots of nodes
page_size = 2
self.patch(tags_views.TagView, 'paginate_by', page_size)
tag = factory.make_tag()
nodes = [
factory.make_node(created="2012-10-12 12:00:%02d" % i)
for i in range(page_size * 2 + 1)
]
for node in nodes:
node.tags = [tag]
# Order node links with newest first as the view is expected to
node_links = [
reverse('node-view', args=[node.system_id])
for node in reversed(nodes)
]
expr_node_links = XPath("//div[@id='nodes']/table//a/@href")
expr_page_anchors = XPath("//div[@class='pagination']//a")
# Fetch first page, should link newest two nodes and page 2
response = self.client.get(reverse('tag-view', args=[tag.name]))
page1 = fromstring(response.content)
self.assertEqual(node_links[:page_size], expr_node_links(page1))
self.assertEqual(
[("next", "?page=2"), ("last", "?page=3")],
[(a.text.lower(), a.get("href"))
for a in expr_page_anchors(page1)])
# Fetch second page, should link next nodes and adjacent pages
response = self.client.get(
reverse('tag-view', args=[tag.name]), {"page": 2})
page2 = fromstring(response.content)
self.assertEqual(
node_links[page_size:page_size * 2],
expr_node_links(page2))
self.assertEqual(
[("first", "."), ("previous", "."),
("next", "?page=3"), ("last", "?page=3")],
[(a.text.lower(), a.get("href"))
for a in expr_page_anchors(page2)])
# Fetch third page, should link oldest node and node list page
response = self.client.get(
reverse('tag-view', args=[tag.name]), {"page": 3})
page3 = fromstring(response.content)
self.assertEqual(node_links[page_size * 2:], expr_node_links(page3))
self.assertEqual(
[("first", "."), ("previous", "?page=2")],
[(a.text.lower(), a.get("href"))
for a in expr_page_anchors(page3)])
| cloudbase/maas | src/maasserver/tests/test_views_tags.py | Python | agpl-3.0 | 6,530 |
# Github: https://github.com/minfun/leetcode
# Email: [email protected]
# Link: https://leetcode.com/problems/add-two-numbers/#/description
import unittest
from leetcode.no2_add_two_numvers import Solution
from leetcode.no2_add_two_numvers import LinkedList
class Test(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test_add_two_numbers(self):
l1 = LinkedList()
l2 = LinkedList()
l3 = LinkedList()
l1.add_node(2)
l1.add_node(4)
l1.add_node(3)
l2.add_node(5)
l2.add_node(6)
l2.add_node(4)
l3.add_node(7)
l3.add_node(0)
l3.add_node(8)
added = self.solution.addTwoNumbers(l1, l2).link_as_list()
self.assertEqual(added, l3.link_as_list())
| minfun/leetcode | tests/test_no2_add_two_numbers.py | Python | mit | 787 |
from tastypie import fields
from tastypie.resources import ModelResource
from plank.models import Service, Category, Status, Event
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.authentication import BasicAuthentication
from tastypie.authorization import DjangoAuthorization
# Authentication class noted from http://stackoverflow.com/a/12273403
class SimpleAuthentication(BasicAuthentication):
'''
Authenticates everyone if the request is GET otherwise performs
BasicAuthentication.
'''
def is_authenticated(self, request, **kwargs):
if request.method == 'GET':
return True
return super(SimpleAuthentication, self).is_authenticated(request,
**kwargs)
class CategoryResource(ModelResource):
class Meta:
queryset = Category.objects.all()
resource_name = 'categories'
excludes = ['id']
authentication = SimpleAuthentication()
authorization = DjangoAuthorization()
filtering = {
"name": ALL,
}
class ServiceResource(ModelResource):
category = fields.ForeignKey(CategoryResource, 'category', full=True)
class Meta:
queryset = Service.objects.all()
resource_name = 'services'
excludes = ['id']
authentication = SimpleAuthentication()
authorization = DjangoAuthorization()
filtering = {
"name": ALL,
}
def dehydrate(self, bundle):
# showing latest event for the category
bundle.data['current-event'] = bundle.obj.current_event()
return bundle
class StatusResource(ModelResource):
class Meta:
queryset = Status.objects.all()
resource_name = 'statuses'
excludes = ['id']
authentication = SimpleAuthentication()
authorization = DjangoAuthorization()
filtering = {
"name": ALL,
}
class EventsResource(ModelResource):
service = fields.ForeignKey(ServiceResource, 'service')
status = fields.ForeignKey(StatusResource, 'status', full=True)
class Meta:
queryset = Event.objects.all()
resource_name = 'events'
excludes = ['id']
authentication = SimpleAuthentication()
authorization = DjangoAuthorization()
| sijis/django-plank | plank/api.py | Python | mit | 2,341 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-28 09:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0010_auto_20170928_0610'),
]
operations = [
migrations.CreateModel(
name='Promotion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.CharField(max_length=150)),
('discount', models.DecimalField(decimal_places=2, default=0.0, max_digits=8)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.RemoveField(
model_name='pattern',
name='material',
),
migrations.RemoveField(
model_name='image',
name='product',
),
migrations.RemoveField(
model_name='product',
name='color',
),
migrations.AddField(
model_name='design',
name='material',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='catalog.Material'),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='design',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='catalog.Design'),
preserve_default=False,
),
migrations.AddField(
model_name='material',
name='color',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='material',
name='description',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='material',
name='image_name',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='image',
name='file_name',
field=models.CharField(max_length=100),
),
migrations.DeleteModel(
name='Pattern',
),
migrations.AddField(
model_name='promotion',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='catalog.Product'),
),
]
| PairMhai/Backend | catalog/migrations/0011_auto_20170928_0924.py | Python | agpl-3.0 | 2,785 |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField, IntegerField, DictField, DateTimeField
TYPE_MANAGED = 'type_managed' # identifies UOW created by Abstract State Machine child for Managed Process
TYPE_FREERUN = 'type_freerun' # identifies UOW created by FreerunStateMachine for ad-hock processing
# UOW was successfully processed by the worker
STATE_PROCESSED = 'state_processed'
# UOW was received by the worker and it started the processing
STATE_IN_PROGRESS = 'state_in_progress'
# UOW was instantiated and send to the worker
STATE_REQUESTED = 'state_requested'
# Job has been manually marked as SKIPPED via MX
# and so the associated UOW got cancelled
# or the life-support threshold has been crossed for failing UOW
STATE_CANCELED = 'state_canceled'
# UOW can get into STATE_INVALID if:
# a. related Job was marked for reprocessing via MX
# b. have failed with an exception at the worker level
# NOTICE: GarbageCollector changes STATE_INVALID -> STATE_REQUESTED during re-posting
STATE_INVALID = 'state_invalid'
# UOW was received by a worker,
# but no data was found to process
STATE_NOOP = 'state_noop'
class UnitOfWork(BaseDocument):
""" Module represents persistent Model for atomic unit of work performed by the system.
UnitOfWork Instances are stored in the <unit_of_work> collection """
db_id = ObjectIdField(name='_id', null=True)
process_name = StringField()
timeperiod = StringField(null=True)
start_timeperiod = StringField(null=True) # [synergy date] lower boundary of the period that needs to be processed
end_timeperiod = StringField(null=True) # [synergy date] upper boundary of the period that needs to be processed
start_id = ObjectIdField(name='start_obj_id') # [DB _id] lower boundary of the period that needs to be processed
end_id = ObjectIdField(name='end_obj_id') # [DB _id] upper boundary of the period that needs to be processed
source = StringField(null=True) # defines source of data for the computation
sink = StringField(null=True) # defines sink where the aggregated data will be saved
arguments = DictField() # task-level arguments that could supplement or override process-level ones
state = StringField(choices=[STATE_INVALID, STATE_REQUESTED, STATE_IN_PROGRESS,
STATE_PROCESSED, STATE_CANCELED, STATE_NOOP])
created_at = DateTimeField()
submitted_at = DateTimeField()
started_at = DateTimeField()
finished_at = DateTimeField()
number_of_aggregated_documents = IntegerField()
number_of_processed_documents = IntegerField()
number_of_retries = IntegerField(default=0)
unit_of_work_type = StringField(choices=[TYPE_MANAGED, TYPE_FREERUN])
@classmethod
def key_fields(cls):
return (cls.process_name.name,
cls.timeperiod.name,
cls.start_id.name,
cls.end_id.name)
@property
def is_active(self):
return self.state in [STATE_REQUESTED, STATE_IN_PROGRESS, STATE_INVALID]
@property
def is_finished(self):
return self.state in [STATE_PROCESSED, STATE_CANCELED, STATE_NOOP]
@property
def is_processed(self):
return self.state == STATE_PROCESSED
@property
def is_noop(self):
return self.state == STATE_NOOP
@property
def is_canceled(self):
return self.state == STATE_CANCELED
@property
def is_invalid(self):
return self.state == STATE_INVALID
@property
def is_requested(self):
return self.state == STATE_REQUESTED
@property
def is_in_progress(self):
return self.state == STATE_IN_PROGRESS
PROCESS_NAME = UnitOfWork.process_name.name
TIMEPERIOD = UnitOfWork.timeperiod.name
START_TIMEPERIOD = UnitOfWork.start_timeperiod.name
END_TIMEPERIOD = UnitOfWork.end_timeperiod.name
START_ID = UnitOfWork.start_id.name
END_ID = UnitOfWork.end_id.name
STATE = UnitOfWork.state.name
UNIT_OF_WORK_TYPE = UnitOfWork.unit_of_work_type.name
| mushkevych/scheduler | synergy/db/model/unit_of_work.py | Python | bsd-3-clause | 4,120 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy
import sys
from sklearn import metrics
class DCASE2016_SceneClassification_Metrics():
"""DCASE 2016 scene classification metrics
Examples
--------
>>> dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> y_true = []
>>> y_pred = []
>>> for result in results:
>>> y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
>>> y_pred.append(result[1])
>>>
>>> dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
>>>
>>> results = dcase2016_scene_metric.results()
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
Evaluated scene labels in the list
"""
self.accuracies_per_class = None
self.Nsys = None
self.Nref = None
self.class_list = class_list
self.eps = numpy.spacing(1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.results()
def accuracies(self, y_true, y_pred, labels):
"""Calculate accuracy
Parameters
----------
y_true : numpy.array
Ground truth array, list of scene labels
y_pred : numpy.array
System output array, list of scene labels
labels : list
list of scene labels
Returns
-------
array : numpy.array [shape=(number of scene labels,)]
Accuracy per scene label class
"""
confusion_matrix = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred, labels=labels).astype(float)
return numpy.divide(numpy.diag(confusion_matrix), numpy.sum(confusion_matrix, 1) + self.eps)
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
accuracies_per_class = self.accuracies(y_pred=system_output, y_true=annotated_ground_truth,
labels=self.class_list)
if self.accuracies_per_class is None:
self.accuracies_per_class = accuracies_per_class
else:
self.accuracies_per_class = numpy.vstack((self.accuracies_per_class, accuracies_per_class))
Nref = numpy.zeros(len(self.class_list))
Nsys = numpy.zeros(len(self.class_list))
for class_id, class_label in enumerate(self.class_list):
for item in system_output:
if item == class_label:
Nsys[class_id] += 1
for item in annotated_ground_truth:
if item == class_label:
Nref[class_id] += 1
if self.Nref is None:
self.Nref = Nref
else:
self.Nref = numpy.vstack((self.Nref, Nref))
if self.Nsys is None:
self.Nsys = Nsys
else:
self.Nsys = numpy.vstack((self.Nsys, Nsys))
def results(self):
"""Get results
Outputs results in dict, format:
{
'class_wise_data':
{
'office': {
'Nsys': 10,
'Nref': 7,
},
}
'class_wise_accuracy':
{
'office': 0.6,
'home': 0.4,
}
'overall_accuracy': numpy.mean(self.accuracies_per_class)
'Nsys': 100,
'Nref': 100,
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'class_wise_data': {},
'class_wise_accuracy': {},
'overall_accuracy': numpy.mean(self.accuracies_per_class)
}
if len(self.Nsys.shape) == 2:
results['Nsys'] = int(sum(sum(self.Nsys)))
results['Nref'] = int(sum(sum(self.Nref)))
else:
results['Nsys'] = int(sum(self.Nsys))
results['Nref'] = int(sum(self.Nref))
for class_id, class_label in enumerate(self.class_list):
if len(self.accuracies_per_class.shape) == 2:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[:, class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(sum(self.Nsys[:, class_id])),
'Nref': int(sum(self.Nref[:, class_id])),
}
else:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(self.Nsys[class_id]),
'Nref': int(self.Nref[class_id]),
}
return results
class EventDetectionMetrics(object):
"""Baseclass for sound event metric classes.
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
"""
self.class_list = class_list
self.eps = numpy.spacing(1)
def max_event_offset(self, data):
"""Get maximum event offset from event list
Parameters
----------
data : list
Event list, list of event dicts
Returns
-------
max : float > 0
Maximum event offset
"""
max = 0
for event in data:
if event['event_offset'] > max:
max = event['event_offset']
return max
def list_to_roll(self, data, time_resolution=0.01):
"""Convert event list into event roll.
Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.
Parameters
----------
data : list
Event list, list of event dicts
time_resolution : float > 0
Time resolution used when converting event into event roll.
Returns
-------
event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
Event roll
"""
# Initialize
data_length = self.max_event_offset(data)
event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))
# Fill-in event_roll
for event in data:
pos = self.class_list.index(event['event_label'].rstrip())
onset = math.floor(event['event_onset'] * 1 / time_resolution)
offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1
event_roll[onset:offset, pos] = 1
return event_roll
class DCASE2016_EventDetection_SegmentBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Segment based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
"""
self.time_resolution = time_resolution
self.overall = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
'ER': 0.0,
'S': 0.0,
'D': 0.0,
'I': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=self.time_resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=self.time_resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute segment-based overall metrics
for segment_id in range(0, annotated_event_roll.shape[0]):
annotated_segment = annotated_event_roll[segment_id, :]
system_segment = system_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
ER = max(Nref, Nsys) - Ntp
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
self.overall['ER'] += ER
for class_id, class_label in enumerate(self.class_list):
annotated_segment = annotated_event_roll[:, class_id]
system_segment = system_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = self.overall['ER'] / self.overall['Nref']
results['overall']['S'] = self.overall['S'] / self.overall['Nref']
results['overall']['D'] = self.overall['D'] / self.overall['Nref']
results['overall']['I'] = self.overall['I'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_id, class_label in enumerate(self.class_list):
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2016_EventDetection_EventBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Event based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0, t_collar=0.2):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
t_collar : float > 0
Time collar for event onset and offset condition
(Default value = 0.2)
"""
self.time_resolution = time_resolution
self.t_collar = t_collar
self.overall = {
'Nref': 0.0,
'Nsys': 0.0,
'Nsubs': 0.0,
'Ntp': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Nref': 0.0,
'Nsys': 0.0,
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Overall metrics
# Total number of detected and reference events
Nsys = len(system_output)
Nref = len(annotated_ground_truth)
sys_correct = numpy.zeros(Nsys, dtype=bool)
ref_correct = numpy.zeros(Nref, dtype=bool)
# Number of correctly transcribed events, onset/offset within a t_collar range
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
label_condition = annotated_ground_truth[j]['event_label'] == system_output[i]['event_label']
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if label_condition and onset_condition and offset_condition:
ref_correct[j] = True
sys_correct[i] = True
break
Ntp = numpy.sum(sys_correct)
sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]
ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
# Substitutions
Nsubs = 0
for j in ref_leftover:
for i in sys_leftover:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Nsubs += 1
break
Nfp = Nsys - Ntp - Nsubs
Nfn = Nref - Ntp - Nsubs
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['Ntp'] += Ntp
self.overall['Nsubs'] += Nsubs
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
# Class-wise metrics
for class_id, class_label in enumerate(self.class_list):
Nref = 0.0
Nsys = 0.0
Ntp = 0.0
# Count event frequencies in the ground truth
for i in range(0, len(annotated_ground_truth)):
if annotated_ground_truth[i]['event_label'] == class_label:
Nref += 1
# Count event frequencies in the system output
for i in range(0, len(system_output)):
if system_output[i]['event_label'] == class_label:
Nsys += 1
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == class_label and system_output[i][
'event_label'] == class_label:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Ntp += 1
break
Nfp = Nsys - Ntp
Nfn = Nref - Ntp
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
def onset_condition(self, annotated_event, system_event, t_collar=0.200):
"""Onset condition, checked does the event pair fulfill condition
Condition:
- event onsets are within t_collar each other
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
Returns
-------
result : bool
Condition result
"""
return math.fabs(annotated_event['event_onset'] - system_event['event_onset']) <= t_collar
def offset_condition(self, annotated_event, system_event, t_collar=0.200, percentage_of_length=0.5):
"""Offset condition, checking does the event pair fulfill condition
Condition:
- event offsets are within t_collar each other
or
- system event offset is within the percentage_of_length*annotated event_length
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
percentage_of_length : float [0-1]
Returns
-------
result : bool
Condition result
"""
annotated_length = annotated_event['event_offset'] - annotated_event['event_onset']
return math.fabs(annotated_event['event_offset'] - system_event['event_offset']) <= max(t_collar,
percentage_of_length * annotated_length)
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = (self.overall['Nfn'] + self.overall['Nfp'] + self.overall['Nsubs']) / self.overall[
'Nref']
results['overall']['S'] = self.overall['Nsubs'] / self.overall['Nref']
results['overall']['D'] = self.overall['Nfn'] / self.overall['Nref']
results['overall']['I'] = self.overall['Nfp'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_label in self.class_list:
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
# Class-wise average
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2013_EventDetection_Metrics(EventDetectionMetrics):
"""Lecagy DCASE2013 metrics, converted from the provided Matlab implementation
Supported metrics:
- Frame based
- F-score (F)
- AEER
- Event based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
- Class based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
"""
#
def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute frame-based metrics
Nref = sum(sum(annotated_event_roll))
Ntot = sum(sum(system_event_roll))
Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
Nsubs = min(Nfp, Nfn)
eps = numpy.spacing(1)
results = dict()
results['Rec'] = Ntp / (Nref + eps)
results['Pre'] = Ntp / (Ntot + eps)
results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
return results
def event_based(self, annotated_ground_truth, system_output):
# Event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events
Ntot = len(system_output)
Nref = len(annotated_ground_truth)
# Number of correctly transcribed events, onset within a +/-100 ms range
Ncorr = 0
NcorrOff = 0
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
Ncorr += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j]['event_onset'])):
NcorrOff += 1
break # In order to not evaluate duplicates
# Compute onset-only event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = min(Nfp, Nfn)
results['onset']['Rec'] = Ncorr / (Nref + eps)
results['onset']['Pre'] = Ncorr / (Ntot + eps)
results['onset']['F'] = 2 * (
(results['onset']['Pre'] * results['onset']['Rec']) / (
results['onset']['Pre'] + results['onset']['Rec'] + eps))
results['onset']['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
# Compute onset-offset event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = min(NfpOff, NfnOff)
results['onset-offset']['Rec'] = NcorrOff / (Nref + eps)
results['onset-offset']['Pre'] = NcorrOff / (Ntot + eps)
results['onset-offset']['F'] = 2 * ((results['onset-offset']['Pre'] * results['onset-offset']['Rec']) / (
results['onset-offset']['Pre'] + results['onset-offset']['Rec'] + eps))
results['onset-offset']['AEER'] = (NfnOff + NfpOff + NsubsOff) / (Nref + eps)
return results
def class_based(self, annotated_ground_truth, system_output):
# Class-wise event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events per class
Ntot = numpy.zeros((len(self.class_list), 1))
for event in system_output:
pos = self.class_list.index(event['event_label'])
Ntot[pos] += 1
Nref = numpy.zeros((len(self.class_list), 1))
for event in annotated_ground_truth:
pos = self.class_list.index(event['event_label'])
Nref[pos] += 1
I = (Nref > 0).nonzero()[0] # index for classes present in ground-truth
# Number of correctly transcribed events per class, onset within a +/-100 ms range
Ncorr = numpy.zeros((len(self.class_list), 1))
NcorrOff = numpy.zeros((len(self.class_list), 1))
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(
annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
pos = self.class_list.index(system_output[i]['event_label'])
Ncorr[pos] += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j][
'event_onset'])):
pos = self.class_list.index(system_output[i]['event_label'])
NcorrOff[pos] += 1
break # In order to not evaluate duplicates
# Compute onset-only class-wise event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = numpy.minimum(Nfp, Nfn)
tempRec = Ncorr[I] / (Nref[I] + eps)
tempPre = Ncorr[I] / (Ntot[I] + eps)
results['onset']['Rec'] = numpy.mean(tempRec)
results['onset']['Pre'] = numpy.mean(tempPre)
tempF = 2 * ((tempPre * tempRec) / (tempPre + tempRec + eps))
results['onset']['F'] = numpy.mean(tempF)
tempAEER = (Nfn[I] + Nfp[I] + Nsubs[I]) / (Nref[I] + eps)
results['onset']['AEER'] = numpy.mean(tempAEER)
# Compute onset-offset class-wise event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = numpy.minimum(NfpOff, NfnOff)
tempRecOff = NcorrOff[I] / (Nref[I] + eps)
tempPreOff = NcorrOff[I] / (Ntot[I] + eps)
results['onset-offset']['Rec'] = numpy.mean(tempRecOff)
results['onset-offset']['Pre'] = numpy.mean(tempPreOff)
tempFOff = 2 * ((tempPreOff * tempRecOff) / (tempPreOff + tempRecOff + eps))
results['onset-offset']['F'] = numpy.mean(tempFOff)
tempAEEROff = (NfnOff[I] + NfpOff[I] + NsubsOff[I]) / (Nref[I] + eps)
results['onset-offset']['AEER'] = numpy.mean(tempAEEROff)
return results
def main(argv):
# Examples to show usage and required data structures
class_list = ['class1', 'class2', 'class3']
system_output = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.1,
'event_offset': 4.7
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
annotated_groundtruth = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.2,
'event_offset': 5.4
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
dcase2013metric = DCASE2013_EventDetection_Metrics(class_list=class_list)
print 'DCASE2013'
print 'Frame-based:', dcase2013metric.frame_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Event-based:', dcase2013metric.event_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Class-based:', dcase2013metric.class_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
dcase2016_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=class_list)
print 'DCASE2016'
print dcase2016_metric.evaluate(system_output=system_output, annotated_ground_truth=annotated_groundtruth).results()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| rohit21122012/DCASE2013 | runs/2016/dnn2016med_traps/traps9/src/evaluation.py | Python | mit | 43,426 |
"""
# Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
"""
from __future__ import absolute_import
from ..msg.Field import *
from ..msg.ImportExportHelper import *
from ..msg.StructValue import *
from ..msg.Type import *
from ..msg.ValueFactory import *
from ..support.Class2TypeMap import *
from ..support.Validator_string import *
from ...util.URL import *
class URLSerializer(ImportExportHelper):
"""
An etch serializer for URL
"""
FIELD_NAME = "urlStr"
@classmethod
def init(cls, typ, class2type):
"""
Defines custom fields in the value factory so that the importer can find them.
@param typ
@param class2type
"""
field = typ.getField(cls.FIELD_NAME)
class2type.put(URL, typ)
typ.setComponentType(URL)
typ.setImportExportHelper( URLSerializer(typ, field))
typ.putValidator( field, Validator_string.get(0))
typ.lock()
def __init__(self, typ, field):
self.__type = typ
self.__field = field
def importValue(self, struct):
return URL(struct.get(field))
def exportValue(self, vf, value):
struct = StructValue(self.__type, vf)
struct.put(self.__field, repr(value))
return struct
| OBIGOGIT/etch | binding-python/runtime/src/main/python/etch/binding/util/URLSerializer.py | Python | apache-2.0 | 2,249 |
import pylab as plt
mySamples = []
myLinear = []
myQuadratic = []
myCubic = []
myExponential = []
for i in range(30):
mySamples.append(i)
myLinear.append(i)
myQuadratic.append(i**2)
myCubic.append(i**3)
myExponential.append(1.5**i)
plt.figure('lin')
plt.clf()
plt.ylim(0, 1000)
plt.plot(mySamples, myLinear)
plt.figure('quad')
plt.clf()
plt.ylim(0, 1000)
plt.plot(mySamples, myQuadratic)
plt.figure('cube')
plt.clf()
plt.plot(mySamples, myCubic)
plt.figure('expo')
plt.clf()
plt.plot(mySamples, myExponential)
plt.figure('lin quad')
plt.clf()
plt.subplot(211)
plt.ylim(0, 900)
plt.plot(mySamples, myLinear, 'b-', label='linear')
plt.legend(loc='upper left')
plt.title('Linear vs. Quadratic')
plt.subplot(212)
plt.ylim(0, 900)
plt.plot(mySamples, myQuadratic, 'ro', label='quadratic')
plt.legend(loc='upper left')
plt.figure('cube exp')
plt.clf()
plt.plot(mySamples, myCubic, 'g^', label='cubic')
plt.plot(mySamples, myExponential, 'r--', label='exponential')
plt.legend()
plt.title('Cubic vs. Exponential')
plt.figure('cube exp log')
plt.clf()
plt.plot(mySamples, myCubic, 'g^', label='cubic')
plt.plot(mySamples, myExponential, 'r--', label='exponential')
plt.yscale('log')
plt.legend()
plt.title('Cubic vs. Exponential')
plt.figure('lin')
plt.title('Linear')
plt.xlabel('sample points')
plt.ylabel('linear function')
plt.figure('quad')
plt.title('Quadratic')
plt.xlabel('sample points')
plt.ylabel('quadratic function')
plt.figure('cube')
plt.title('Cubic')
plt.xlabel('sample points')
plt.ylabel('cubic function')
plt.figure('expo')
plt.title('Exponential')
plt.xlabel('sample points')
plt.ylabel('exponential function')
| Mdlkxzmcp/various_python | Alpha & Beta/Mathplotnumpypandas/MIT6001.py | Python | mit | 1,655 |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__all__ = [
'build',
'build_from_document'
'fix_method_name',
'key2param'
]
import copy
import httplib2
import logging
import os
import random
import re
import uritemplate
import urllib
import urlparse
import mimeparse
import mimetypes
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownLinkType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import MediaModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from oauth2client.anyjson import simplejson
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Parameters accepted by the stack, but not visible via discovery.
STACK_QUERY_PARAMETERS = ['trace', 'pp', 'userip', 'strict']
# Python reserved words.
RESERVED_WORDS = ['and', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'else', 'except', 'exec', 'finally', 'for', 'from',
'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while' ]
def fix_method_name(name):
"""Fix method names to avoid reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with a '_' prefixed if the name is a reserved word.
"""
if name in RESERVED_WORDS:
return name + '_'
else:
return name
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: apiclient.http.HttpRequest, encapsulator for an HTTP
request.
Returns:
A Resource object with methods for interacting with the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
return build_from_document(content, discoveryServiceUrl, http=http,
developerKey=developerKey, model=model, requestBuilder=requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object from a discovery
document that is it given, as opposed to retrieving one over HTTP.
Args:
service: string, discovery document.
base: string, base URI for all HTTP requests, usually the discovery URI.
future: string, discovery document with future capabilities (deprecated).
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and de-serializes requests and
responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with the service.
"""
# future is no longer used.
future = {}
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = _createResource(http, base, model, requestBuilder, developerKey,
service, service, schema)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
MULTIPLIERS = {
"KB": 2 ** 10,
"MB": 2 ** 20,
"GB": 2 ** 30,
"TB": 2 ** 40,
}
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
maxSize: string, size as a string, such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if len(maxSize) < 2:
return 0
units = maxSize[-2:].upper()
multiplier = MULTIPLIERS.get(units, 0)
if multiplier:
return int(maxSize[:-2]) * multiplier
else:
return int(maxSize)
def _createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, rootDesc, schema):
"""Build a Resource from the API description.
Args:
http: httplib2.Http, Object to make http requests with.
baseUrl: string, base URL for the API. All requests are relative to this
URI.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: class or callable that instantiates an
apiclient.HttpRequest object.
developerKey: string, key obtained from
https://code.google.com/apis/console
resourceDesc: object, section of deserialized discovery document that
describes a resource. Note that the top level discovery document
is considered a resource.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
Returns:
An instance of Resource with all the methods attached for interacting with
that resource.
"""
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates a method for attaching to a Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
# TODO(user) Use URLs from discovery once it is updated.
parsed = list(urlparse.urlparse(baseUrl))
basePath = parsed[2]
mediaPathUrl = '/upload' + basePath + pathUrl
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
# Add in the parameters common to all methods.
for name, desc in rootDesc.get('parameters', {}).iteritems():
methodDesc['parameters'][name] = desc
# Add in undocumented query parameters.
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH'] and 'request' in methodDesc:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'request' in methodDesc:
methodDesc['parameters']['body'].update(methodDesc['request'])
else:
methodDesc['parameters']['body']['type'] = 'object'
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
if 'body' in methodDesc['parameters']:
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
# Don't bother with doc string, it will be over-written by createMethod.
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
# Remove args that have a value of None.
keys = kwargs.keys()
for name in keys:
if kwargs[name] is None:
del kwargs[name]
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in repeated_params and
not isinstance(kwargs[name], basestring)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
# If there is no schema for the response then presume a binary blob.
if methodName.endswith('_media'):
model = MediaModel()
elif 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
# Skip undocumented params and params common to all methods.
skip_parameters = rootDesc.get('parameters', {}).keys()
skip_parameters.append(STACK_QUERY_PARAMETERS)
for arg in argmap.iterkeys():
if arg in skip_parameters:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
if methodName.endswith('_media'):
docs.append('\nReturns:\n The media object as a string.\n\n ')
else:
docs.append('\nReturns:\n An object of the form:\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates any _next methods for attaching to a Resource.
The _next methods allow for easy iteration through list() responses.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
createMethod(Resource, methodName, methodDesc, rootDesc)
# Add in _media methods. The functionality of the attached method will
# change when it sees that the method name ends in _media.
if methodDesc.get('supportsMediaDownload', False):
createMethod(Resource, methodName + '_media', methodDesc, rootDesc)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, rootDesc):
"""Create a method on the Resource to access a nested Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
def methodResource(self):
return _createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, rootDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
createResourceMethod(Resource, methodName, methodDesc, rootDesc)
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
| palladius/gcloud | packages/gcutil-1.7.1/lib/google_api_python_client/apiclient/discovery.py | Python | gpl-3.0 | 26,209 |
import datetime
import pandas as pd
df = pd.io.parsers.read_csv("labdata_2014-07-10_2.csv")
grouped = df.groupby(['patient', 'lab_id'])
for name, group in df.groupby(['patient', 'lab_id']):
m = group.lab_dmy.dropna().min()
group['diff2'] = pd.to_datetime(group['lab_dmy']) - pd.to_datetime(m)
group.to_csv("out3.csv", mode="a", header=False)
| davidmatten/eva | prepare_data/transform/transform_v0.1.py | Python | gpl-2.0 | 359 |
# -*- coding: utf-8 -*-
from eclcli.common import command
from eclcli.common import exceptions
from eclcli.common import utils
from eclcli.identity import common as identity_common
from ..sssclient.common.utils import objectify
class ListContract(command.Lister):
def get_parser(self, prog_name):
parser = super(ListContract, self).get_parser(prog_name)
parser.add_argument(
'channel_id',
metavar="<uuid>",
help=("Target channel_id under own contract.")
)
parser.add_argument(
'--include_deleted',
metavar="<boolean>",
type=utils.parse_bool,
choices=[True, False],
help="default is false. (true : Include deleted contract "
"/ false: exclude deleted contract."
)
return parser
def take_action(self, parsed_args):
sss_client = self.app.client_manager.sss
columns = (
)
column_headers = (
)
channel_id = parsed_args.channel_id
data = [objectify(contract)
for contract in sss_client.list_contracts(channel_id).get('contracts')]
return (column_headers,
(utils.get_item_properties(
s, columns,
) for s in data))
class ShowContract(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowContract, self).get_parser(prog_name)
parser.add_argument(
'contract_id',
metavar="<uuid>",
help="The contract ID getting more information"
)
return parser
def take_action(self, parsed_args):
sss_client = self.app.client_manager.sss
contract_id = parsed_args.contract_id
contract = sss_client.show_contract(contract_id)
columns = utils.get_columns(contract)
obj = objectify(contract)
data = utils.get_item_properties(obj, columns)
return columns, data
class CreateContract(command.ShowOne):
def get_parser(self, prog_name):
parser = super(CreateContract, self).get_parser(prog_name)
parser.add_argument(
'login_id',
metavar='<login_id>',
help='Login id of new user.'
)
parser.add_argument(
'mail_address',
metavar='<mail_address>',
help='Mail address of new user.'
)
parser.add_argument(
'channel_id',
metavar='<uuid>',
help=('The channel means the group to manage contracts.',
'The partner user will be given 2 channels.',
'One is the channel that contains own contract.',
'The other is the channel that contains all end user'
' contracts which the partner user has.',
'By executing the List Channel API(For partner user only), '
'the user can get your (and enduser\'s) channel ID.')
)
parser.add_argument(
'--notify_password',
metavar='<boolean>',
type=utils.parse_bool,
choices=[True, False],
help='If this flag is set \'true\', notification eamil will be '
'sent to new user\'s email.'
)
parser.add_argument(
'--external_reference_id',
metavar='<uuid>',
help=('By using this item, the partner API user can associate '
'optional string to the constract(e.g. The end user management '
'ID in the partner user\'s system).',
'Note that this ID will be NOT used to control the contract '
'in ECL2.0 internal system.',
'If the item is set as blank, ECL 2.0 system set the end user\'s'
' contract ID automatically(e.g. econXXXXXXXX).')
)
parser.add_argument(
'--password',
metavar='<password>',
help=('Initial password of new user.',
'If this parameter is not designated, random initial password is '
'generated and applied to new user.')
)
return parser
def take_action(self, parsed_args):
sss_client = self.app.client_manager.sss
body = {}
if parsed_args.login_id is not None:
body['login_id'] = str(parsed_args.login_id)
if parsed_args.mail_address is not None:
body['mail_address'] = str(parsed_args.mail_address)
if parsed_args.channel_id is not None:
body['channel_id'] = str(parsed_args.channel_id)
if parsed_args.notify_password is not None:
body['notify_password'] = parsed_args.notify_password
if parsed_args.password is not None:
body['password'] = str(parsed_args.password)
if parsed_args.external_reference_id is not None:
body['external_reference_id'] = str(parsed_args.external_reference_id)
contract = sss_client.create_contract(body)
columns = utils.get_columns(contract)
obj = objectify(contract)
data = utils.get_item_properties(obj, columns)
return (columns, data)
class DeleteContract(command.Command):
def get_parser(self, prog_name):
parser = super(DeleteContract, self).get_parser(prog_name)
parser.add_argument(
'contract_id',
metavar="<uuid>",
nargs="+",
help=("Contract ID of Delete target")
)
return parser
def take_action(self, parsed_args):
sss_client = self.app.client_manager.sss
for contract_id in parsed_args.contract_id:
sss_client.delete_contract(contract_id)
class ShowBilling(command.ShowOne):
def get_parser(self, prog_name):
parser = super(ShowBilling, self).get_parser(prog_name)
parser.add_argument(
'contract_id',
metavar="<uuid>",
help=("The contract ID getting more information")
)
parser.add_argument(
'target_month',
metavar="<target_month>",
help=("target billing month with YYYY-MM format")
)
return parser
def take_action(self, parsed_args):
sss_client = self.app.client_manager.sss
contract_id = parsed_args.contract_id
target_month = parsed_args.target_month
billing = sss_client.show_billing(contract_id, target_month)
columns = utils.get_columns(billing)
obj = objectify(billing)
data = utils.get_item_properties(obj, columns)
return (columns, data)
| anythingrandom/eclcli | eclcli/sss/v1/contract.py | Python | apache-2.0 | 6,670 |
import pickle
import datetime
import ml.shallow_model as sm
import ml.deep_model as dl
import data_gen as dg
import sys
import numpy as np
import threading
parameter_class = 'HRV,SPE,MOR(3)R'
threshold = 1.3
fold = 5
mode = parameter_class + '_' + str(threshold) + '_' + str(fold)
def pos_counter(Y):
cnt = 0
for y in Y:
if y == 0:
cnt += 1
return cnt, len(Y) - cnt
if __name__ == '__main__':
sys.setrecursionlimit(10000)
print(mode)
info_list = dg.gen_info_pickle()
cv_list = dg.cross_validation_DS_gen(info_list, fold)
for i in range(fold):
train_list = []
for j in range(fold):
if i == j: continue
train_list += cv_list[j]
valid_list = cv_list[i]
X_train, param, _ = dg.gen_x(train_list, ['BAS', 'HRV', 'SPE', 'MOR'], '')
X_test, _, _ = dg.gen_x(valid_list, ['BAS', 'HRV', 'SPE', 'MOR'], '')
print('Use ' + str(len(param)) + ' features!')
pen = open('rubbish/params.csv', 'a')
sentence = '\n' + mode
for p in param:
sentence += ',' + p
pen.write(sentence)
pen.close()
Y_train, cnt = dg.gen_y_pi(train_list, threshold)
Y_test, cnt_test = dg.gen_y_pi(valid_list, threshold)
X_train, Y_train = dg.pos_neg_regulator(X_train, Y_train, cnt, len(Y_train) - cnt)
p, n = pos_counter(Y_train)
pp, nn = pos_counter(Y_test)
print('Y_train: ' + str(p) + '\t' + str(n))
print('Y_test: ' + str(pp) + '\t' + str(nn))
X_train = np.array(X_train)
X_test = np.array(X_test)
Y_train = np.array(Y_train)
Y_test = np.array(Y_test)
line = mode + ',' + str(i) +',' + sm.rf_train_test(X_train, X_test, Y_train, Y_test)
pen = open('CVRes.csv', 'a')
pen.write(line + '\n')
pen.close()
line = mode + ',' + str(i) + ',' + sm.lr_train_test(X_train, X_test, Y_train, Y_test)
pen = open('CVRes.csv', 'a')
pen.write(line + '\n')
pen.close()
line = mode + ',' + str(i) + ',' + sm.gb_train_test(X_train, X_test, Y_train, Y_test)
pen = open('CVRes.csv', 'a')
pen.write(line + '\n')
pen.close()
line = mode + ',' + str(i) + ',' + dl.dnn_train_test(X_train, X_test, Y_train, Y_test)
pen = open('CVRes.csv', 'a')
pen.write(line + '\n')
pen.close()
| RichardLeeK/MachineLearning | MachineLearning/CBFV/CBFVCV.py | Python | mit | 2,229 |
# vi: ts=8 sts=4 sw=4 et
#
# support.py: test support for draco2.model
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import os
from draco2.model.test.shopmodel import ShopModel
from draco2.database.test.support import DatabaseTest
class ModelTest(DatabaseTest):
"""Base class for model tests."""
def setup_method(cls, method):
super(ModelTest, cls).setup_method(method)
cls.model = ShopModel(cls.database)
cls.schema = cls.model.schema()
cls.schema.drop()
cls.schema.create()
def teardown_method(cls, method):
cls.model._finalize()
cls.schema.drop()
super(ModelTest, cls).teardown_method(method)
| geertj/draco2 | draco2/model/test/support.py | Python | mit | 972 |
# -*- coding: utf-8 -*-
import subprocess
import sys
import os.path
import re
import math
import spot
from IPython.display import SVG
from datetime import datetime
import pandas as pd
from experiments_lib import hoa_to_spot, dot_to_svg, pretty_print
def bogus_to_lcr(form):
"""Converts a formula as it is printed in ``_bogus.ltl`` file
(uses ``--relabel=abc``) to use ``pnn`` AP names.
"""
args = ['-r0','--relabel=pnn','-f',form]
return subprocess.check_output(["ltlfilt"] + args, universal_newlines=True).strip()
def parse_check_log(log_f):
"""Parses a given log file and locates cases where
sanity checks found some error.
Returns:
bugs: a dict: ``form_id``->``list of error lines``
bogus_forms: a dict: ``form_id``->``form``
tools: a dict: ``tool_id``->``command``
"""
log = open(log_f,'r')
bugs = {}
bogus_forms = {}
formula = re.compile('.*ltl:(\d+): (.*)$')
empty_line = re.compile('^\s$')
problem = re.compile('error: .* nonempty')
for line in log:
m_form = formula.match(line)
if m_form:
form = m_form
f_bugs = []
m_empty = empty_line.match(line)
if m_empty:
if len(f_bugs) > 0:
form_id = int(form.group(1))-1
bugs[form_id] = f_bugs
bogus_forms[form_id] = form.group(2)
m_prob = problem.match(line)
if m_prob:
f_bugs.append(m_prob.group(0))
log.close()
tools = parse_log_tools(log_f)
return bugs, bogus_forms, tools
def find_log_for(tool_code, form_id, log_f):
"""Returns an array of lines from log for
given tool code (P1,N3,...) and form_id. The
form_id is taken from runner - thus we search for
formula number ``form_id+1``
"""
log = open(log_f,'r')
current_f = -1
formula = re.compile('.*ltl:(\d+): (.*)$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
gather = re.compile('Performing sanity checks and gathering statistics')
output = []
for line in log:
m_form = formula.match(line)
if m_form:
current_f = int(m_form.group(1))
curr_tool = ''
if current_f < form_id+1:
continue
if current_f > form_id+1:
break
m_tool = tool.match(line)
if m_tool:
curr_tool = m_tool.group(1)
if gather.match(line):
curr_tool = 'end'
if curr_tool == tool_code:
output.append(line.strip())
log.close()
return output
def hunt_error_types(log_f):
log = open(log_f,'r')
errors = {}
err_forms = {}
formula = re.compile('.*ltl:(\d+): (.*)$')
empty_line = re.compile('^\s$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
problem = re.compile('error: .*')
nonempty = re.compile('error: (.*) is nonempty')
for line in log:
m_form = formula.match(line)
if m_form:
form = m_form
f_bugs = {}
m_tool = tool.match(line)
if m_tool:
tid = m_tool.group(1)
m_empty = empty_line.match(line)
if m_empty:
if len(f_bugs) > 0:
form_id = int(form.group(1))-1
errors[form_id] = f_bugs
err_forms[form_id] = form.group(2)
m_prob = problem.match(line)
if m_prob:
prob = m_prob.group(0)
m_bug = nonempty.match(line)
if m_bug:
prob = 'nonempty'
tid = m_bug.group(1)
if prob not in f_bugs:
f_bugs[prob] = []
f_bugs[prob].append(tid)
log.close()
tools = parse_log_tools(log_f)
return errors, err_forms, tools
def parse_log_tools(log_f):
log = open(log_f,'r')
tools = {}
tool = re.compile('.*\[(P\d+)\]: (.*)$')
empty_line = re.compile('^\s$')
for line in log:
m_tool = tool.match(line)
m_empty = empty_line.match(line)
if m_empty:
break
if m_tool:
tid = m_tool.group(1)
tcmd = m_tool.group(2)
tools[tid] = tcmd
log.close()
return tools
class LtlcrossRunner(object):
"""A class for running Spot's `ltlcross` and storing and manipulating
its results. For LTL3HOA it can also draw very weak alternating automata
(VWAA).
Parameters
----------
tools : a dict (String -> String)
The records in the dict of the form ``name : ltlcross_cmd``
>>> tools = {"LTL3HOA" : "ltl3hoa -d -x -i -p 2 -f %f > %O",
>>> "SPOT": : "ltl2tgba"
>>> }
formula_files : a list of strings
paths to files with formulas to be fed to `ltlcross`
res_filename : String
filename to store the ltlcross`s results
cols : list of Strings, default ``['states','edges','transitions']``
names of ltlcross's statistics columns to be recorded
"""
def __init__(self, tools,
formula_files=['formulae/classic.ltl'],
res_filename='na_comp.csv',
cols=['states', 'edges', 'transitions'],
log_file=None,
):
self.tools = tools
self.mins = []
self.f_files = formula_files
self.cols = cols.copy()
self.automata = None
self.values = None
self.form = None
if res_filename == '' or res_filename is None:
self.res_file = '_'.join(tools.keys()) + '.csv'
else:
self.res_file = res_filename
if log_file is None:
self.log_file = self.res_file[:-3] + 'log'
else:
self.log_file = log_file
def create_args(self, automata=True, check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
forms = True, escape_tools=False):
"""Creates args that are passed to run_ltlcross
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
### Prepare ltlcross command ###
tools_strs = ["{"+name+"}" + cmd for (name, cmd) in self.tools.items() if name in tool_subset]
if escape_tools:
tools_strs = ["'{}'".format(t_str) for t_str in tools_strs]
args = tools_strs
if forms:
args += ' '.join(['-F '+F for F in self.f_files]).split()
if timeout:
args.append('--timeout='+timeout)
if automata:
args.append('--automata')
if save_bogus:
args.append('--save-bogus={}_bogus.ltl'.format(res_file[:-4]))
if not check:
args.append('--no-checks')
#else:
# args.append('--reference={ref_Spot}ltl2tgba -H %f')
args.append('--products=0')
args.append('--csv='+res_file)
return args
def ltlcross_cmd(self, args=None, automata=True,
check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
forms=True, lcr='ltlcross'):
"""Returns ltlcross command for the parameters.
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
if args is None:
args = self.create_args(automata, check, timeout,
log_file, res_file,
save_bogus, tool_subset, forms,
escape_tools=True)
return ' '.join([lcr] + args)
def run_ltlcross(self, args=None, automata=True,
check=False, timeout='300',
log_file=None, res_file=None,
save_bogus=True, tool_subset=None,
lcr='ltlcross'):
"""Removes any older version of ``self.res_file`` and runs `ltlcross`
on all tools.
Parameters
----------
args : a list of ltlcross arguments that can be used for subprocess
tool_subset : a list of names from self.tools
"""
if log_file is None:
log_file = self.log_file
if res_file is None:
res_file = self.res_file
if tool_subset is None:
tool_subset=self.tools.keys()
if args is None:
args = self.create_args(automata, check, timeout,
log_file, res_file,
save_bogus, tool_subset)
# Delete ltlcross result and lof files
subprocess.call(["rm", "-f", res_file, log_file])
## Run ltlcross ##
log = open(log_file,'w')
cmd = self.ltlcross_cmd(args,lcr=lcr)
print(cmd, file=log)
print(datetime.now().strftime('[%d.%m.%Y %T]'), file=log)
print('=====================', file=log,flush=True)
self.returncode = subprocess.call([lcr] + args, stderr=subprocess.STDOUT, stdout=log)
log.writelines([str(self.returncode)+'\n'])
log.close()
def parse_results(self, res_file=None):
"""Parses the ``self.res_file`` and sets the values, automata, and
form. If there are no results yet, it runs ltlcross before.
"""
if res_file is None:
res_file = self.res_file
if not os.path.isfile(res_file):
raise FileNotFoundError(res_file)
res = pd.read_csv(res_file)
# Add incorrect columns to track flawed automata
if not 'incorrect' in res.columns:
res['incorrect'] = False
# Removes unnecessary parenthesis from formulas
res.formula = res['formula'].map(pretty_print)
form = pd.DataFrame(res.formula.drop_duplicates())
form['form_id'] = range(len(form))
form.index = form.form_id
res = form.merge(res)
# Shape the table
table = res.set_index(['form_id', 'formula', 'tool'])
table = table.unstack(2)
table.axes[1].set_names(['column','tool'],inplace=True)
# Create separate tables for automata
automata = None
if 'automaton' in table.columns.levels[0]:
automata = table[['automaton']]
# Removes formula column from the index
automata.index = automata.index.levels[0]
# Removes `automata` from column names -- flatten the index
automata.columns = automata.columns.levels[1]
form = form.set_index(['form_id', 'formula'])
# Store incorrect and exit_status information separately
self.incorrect = table[['incorrect']]
self.incorrect.columns = self.incorrect.columns.droplevel()
self.exit_status = table[['exit_status']]
self.exit_status.columns = self.exit_status.columns.droplevel()
# stores the followed columns only
values = table[self.cols]
self.form = form
self.values = values.sort_index(axis=1,level=['column','tool'])
# self.compute_best("Minimum")
if automata is not None:
self.automata = automata
def compute_sbacc(self,col='states'):
def get_sbacc(aut):
if isinstance(aut, float) and math.isnan(aut):
return None
a = spot.automata(aut+'\n')
aut = next(a)
aut = spot.sbacc(aut)
if col == 'states':
return aut.num_states()
if col == 'acc':
return aut.num_sets()
df = self.automata.copy()
# Recreate the same index as for other cols
n_i = [(l, self.form_of_id(l,False)) for l in df.index]
df.index = pd.MultiIndex.from_tuples(n_i)
df.index.names=['form_id','formula']
# Recreate the same columns hierarchy
df = df.T
df['column'] = 'sb_{}'.format(col)
self.cols.append('sb_{}'.format(col))
df = df.set_index(['column'],append=True)
df = df.T.swaplevel(axis=1)
# Compute the requested values and add them to others
df = df.applymap(get_sbacc)
self.values = self.values.join(df)
def compute_best(self, tools=None, colname="Minimum"):
"""Computes minimum values over tools in ``tools`` for all
formulas and stores them in column ``colname``.
Parameters
----------
tools : list of Strings
column names that are used to compute the min over
colname : String
name of column used to store the computed values
"""
if tools is None:
tools = list(self.tools.keys())
else:
tools = [t for t in tools if t in self.tools.keys()
or t in self.mins]
self.mins.append(colname)
for col in self.cols:
self.values[col, colname] = self.values[col][tools].min(axis=1)
self.values.sort_index(axis=1, level=0, inplace=True)
def aut_for_id(self, form_id, tool):
"""For given formula id and tool it returns the corresponding
non-deterministic automaton as a Spot's object.
Parameters
----------
form_id : int
id of formula to use
tool : String
name of the tool to use to produce the automaton
"""
if self.automata is None:
raise AssertionError("No results parsed yet")
if tool not in self.tools.keys():
raise ValueError(tool)
return hoa_to_spot(self.automata.loc[form_id, tool])
def cummulative(self, col="states"):
"""Returns table with cummulative numbers of given ``col``.
Parameters
---------
col : String
One of the followed columns (``states`` default)
"""
return self.values[col].dropna().sum()
def smaller_than(self, t1, t2, reverse=False,
restrict=True,
col='states', restrict_cols=True):
"""Returns a dataframe with results where ``col`` for ``tool1``
has strictly smaller value than ``col`` for ``tool2``.
Parameters
----------
t1 : String
name of tool for comparison (the better one)
must be among tools
t2 : String
name of tool for comparison (the worse one)
must be among tools
reverse : Boolean, default ``False``
if ``True``, it switches ``tool1`` and ``tool2``
restrict : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
tools
col : String, default ``'states'``
name of column use for comparison.
restrict_cols : Boolean, default ``True``
if ``True``, show only the compared column
"""
return self.better_than(t1,t2,reverse=reverse,
props=[col],include_fails=False,
restrict_cols=restrict_cols,
restrict_tools=restrict)
def better_than(self, t1, t2, props=['states','acc'],
reverse=False, include_fails=True,
restrict_cols=True,restrict_tools=True
):
"""Compares ``t1`` against ``t2`` lexicographicaly
on cols from ``props`` and returns DataFrame with
results where ``t1`` is better than ``t2``.
Parameters
----------
t1 : String
name of tool for comparison (the better one)
must be among tools
t2 : String
name of tool for comparison (the worse one)
must be among tools
props : list of Strings, default (['states','acc'])
list of columns on which we want the comparison (in order)
reverse : Boolean, default ``False``
if ``True``, it switches ``t1`` and ``t2``
include_fails : Boolean, default ``True``
if ``True``, include formulae where t2 fails and t1 does not
fail
restrict_cols : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
property columns
restrict_tools : Boolean, default ``True``
if ``True``, the returned DataFrame contains only the compared
tools
"""
if t1 not in list(self.tools.keys())+self.mins:
raise ValueError(t1)
if t2 not in list(self.tools.keys())+self.mins:
raise ValueError(t2)
if reverse:
t1, t2 = t2, t1
v = self.values
t1_ok = self.exit_status[t1] == 'ok'
if include_fails:
t2_ok = self.exit_status[t2] == 'ok'
# non-fail beats fail
c = v[t1_ok & ~t2_ok]
# We work on non-failures only from now on
eq = t1_ok & t2_ok
else:
c = pd.DataFrame()
eq = t1_ok
for prop in props:
# For each prop we add t1 < t2
better = v[prop][t1] < v[prop][t2]
# but only from those which were equivalent so far
equiv_and_better = v.loc[better & eq]
c = c.append(equiv_and_better)
# And now choose those equivalent also on prop to eq
eq = eq & (v[prop][t1] == v[prop][t2])
# format the output
idx = pd.IndexSlice
tools = [t1,t2] if restrict_tools else slice(None)
props = props if restrict_cols else slice(None)
return c.loc[:,idx[props,tools]]
def form_of_id(self, form_id, spot_obj=True):
"""For given form_id returns the formula
Parameters
----------
form_id : int
id of formula to return
spot_obj : Bool
If ``True``, returns Spot formula object (uses Latex to
print the formula in Jupyter notebooks)
"""
f = self.values.index[form_id][1]
if spot_obj:
return spot.formula(f)
return f
def id_of_form(self, f, convert=False):
"""Returns id of a given formula. If ``convert`` is ``True``
it also calls ``bogus_to_lcr`` first.
"""
if convert:
f = bogus_to_lcr(f)
ni = self.values.index.droplevel(0)
return ni.get_loc(f)
def mark_incorrect(self, form_id, tool,output_file=None,input_file=None):
"""Marks automaton given by the formula id and tool as flawed
and writes it into the .csv file
"""
if tool not in self.tools.keys():
raise ValueError(tool)
# Put changes into the .csv file
if output_file is None:
output_file = self.res_file
if input_file is None:
input_file = self.res_file
csv = pd.read_csv(input_file)
if not 'incorrect' in csv.columns:
csv['incorrect'] = False
cond = (csv['formula'].map(pretty_print) ==
pretty_print(self.form_of_id(form_id,False))) &\
(csv.tool == tool)
csv.loc[cond,'incorrect'] = True
csv.to_csv(output_file,index=False)
# Mark the information into self.incorrect
self.incorrect.loc[self.index_for(form_id)][tool] = True
def na_incorrect(self):
"""Marks values for flawed automata as N/A. This causes
that the touched formulae will be removed from cummulative
etc. if computed again. To reverse this information you
have to parse the results again.
It also sets ``exit_status`` to ``incorrect``
"""
self.values = self.values[~self.incorrect]
self.exit_status[self.incorrect] = 'incorrect'
def index_for(self, form_id):
return (form_id,self.form_of_id(form_id,False))
def get_error_count(self,err_type='timeout',drop_zeros=True):
"""Returns a Series with total number of er_type errors for
each tool.
Parameters
----------
err_type : String one of `timeout`, `parse error`,
`incorrect`, `crash`, or
'no output'
Type of error we seek
drop_zeros: Boolean (default True)
If true, rows with zeros are removed
"""
if err_type not in ['timeout', 'parse error',
'incorrect', 'crash',
'no output']:
raise ValueError(err_type)
if err_type == 'crash':
c1 = self.exit_status == 'exit code'
c2 = self.exit_status == 'signal'
res = (c1 | c2).sum()
else:
res = (self.exit_status == err_type).sum()
if drop_zeros:
return res.iloc[res.nonzero()]
return res
def cross_compare(self,tools=None,props=['states','acc'],
include_fails=True, total=True,
include_other=True):
def count_better(tool1,tool2):
if tool1 == tool2:
return float('nan')
try:
return len(self.better_than(tool1,tool2,props,
include_fails=include_fails))
except ValueError as e:
if include_other:
return float('nan')
else:
raise e
if tools is None:
tools = self.tools.keys()
c = pd.DataFrame(index=tools, columns=tools).fillna(0)
for tool in tools:
c[tool] = pd.DataFrame(c[tool]).apply(lambda x:
count_better(x.name,tool), 1)
if total:
c['V'] = c.sum(axis=1)
return c
def min_counts(self, tools=None, restrict_tools=False, unique_only=False, col='states',min_name='min(count)'):
if tools is None:
tools = list(self.tools.keys())
else:
tools = [t for t in tools if
t in self.tools.keys() or
t in self.mins]
min_tools = tools if restrict_tools else list(self.tools.keys())
self.compute_best(tools=min_tools, colname=min_name)
s = self.values.loc(axis=1)[col]
df = s.loc(axis=1)[tools+[min_name]]
is_min = lambda x: x[x == x[min_name]]
best_t_count = df.apply(is_min, axis=1).count(axis=1)
choose = (df[best_t_count == 2]) if unique_only else df
choose = choose.index
min_counts = df.loc[choose].apply(is_min,axis=1).count()
return pd.DataFrame(min_counts[min_counts.index != min_name])
def param_runner(name, tools, data_dir='data_param'):
cols=["states","transitions","acc","time","nondet_states"]
r = LtlcrossRunner(tools,\
res_filename='{}/{}.csv'.format(data_dir,name),\
formula_files=['formulae/{}.ltl'.format(name)],\
cols=cols)
return r | jurajmajor/ltl3hoa | Experiments/ltlcross_runner.py | Python | gpl-3.0 | 23,066 |
import sys
import pdb
from numpy import *
import math
import argparse
parser = argparse.ArgumentParser(description='Analysis and assessment of variant information content (vic)')
parser.add_argument('inputfile', metavar='INPUTFILE', type=str, help='The input file')
parser.add_argument('--fcol', type=int, default=4, help='The column number where the features start')
parser.add_argument('--regcol', type=int, default=3, help='The column specifying the region')
args = parser.parse_args()
ifile = open(args.inputfile)
freqfile = open(args.inputfile+'.freq', 'w')
vicfile = open(args.inputfile+'.vic', 'w')
header = True
region_values = {}
convert = {'str':lambda x:str(x), 'int':lambda x:int(x), 'float':lambda x:float(x)}
lines = ifile.readlines()
for nl, line in enumerate(lines):
line = line.strip()
if line[0] != '#' and len(line) > 0:
fields = line.split('\t')
if header:
numfeatures = len(fields[args.fcol:])
types = ['str']*numfeatures
values = []
freqs = {}
vic_fields = []
values = []
for i, feature in enumerate(fields[args.fcol:]):
if ':' in feature:
types[i] = feature.split(':')[-1]
header = False
else:
region = fields[args.regcol]
if region in region_values:
for i, feature in enumerate(fields[args.fcol:]):
region_values[region][1][i].append(convert[types[i]](feature))
region_values[region][0].append(fields[:3])
else:
region_values[region] = [[fields[:3]], []]
for i, feature in enumerate(fields[args.fcol:]):
region_values[region][1].append([convert[types[i]](feature)])
for region in region_values:
vic_fields = region_values[region][0]
values = region_values[region][1]
bin_edges = []
tuples = []
freqs = {}
for i in range(numfeatures):
bin_edges.append([])
for i in range(numfeatures):
if types[i] == 'int' or types[i] == 'float':
hist, bin_edges[i] = histogram(values[i])
for i in range(len(values[0])):
tuplist = []
for j in range(numfeatures):
if types[j] == 'int' or types[j] == 'float':
idx = -1
for k in range(len(bin_edges[j])):
if values[j][i] > bin_edges[j][k]:
idx = k-1
break
tuplist.append(bin_edges[j][idx])
else:
tuplist.append(values[j][i])
tuples.append(tuple(tuplist))
for t in tuples:
if t not in freqs:
count = 0.
for t2 in tuples:
if t == t2:
count += 1
freqs[t] = count/len(tuples)
for i,t in enumerate(tuples):
p = freqs[t]
vicfile.write('%s\t%s\t%s\t' % (vic_fields[i][0], vic_fields[i][1], vic_fields[i][2]))
vicfile.write('%s\t%s\n' % (region, -p*math.log(p, 2)))
freq_output = region + '\t'
for tup in freqs:
freq = freqs[tup]
freq_output += ' %s:%s' % (str(tup).strip('()'), freq)
freqfile.write(freq_output + '\n')
vicfile.close()
freqfile.close()
ifile.close()
| dimenwarper/vic | vic_analysis.py | Python | gpl-2.0 | 2,860 |
import os
from xbmcswift2.logger import log
from xbmcswift2.mockxbmc import utils
def _get_env_setting(name):
return os.getenv('XBMCSWIFT2_%s' % name.upper())
class Addon(object):
def __init__(self, id=None):
# In CLI mode, xbmcswift2 must be run from the root of the addon
# directory, so we can rely on getcwd() being correct.
addonxml = os.path.join(os.getcwd(), 'addon.xml')
id = id or utils.get_addon_id(addonxml)
self._info = {
'id': id,
'name': utils.get_addon_name(addonxml),
'profile': 'special://profile/addon_data/%s/' % id,
'path': 'special://home/addons/%s' % id
}
self._strings = {}
self._settings = {}
def getAddonInfo(self, id):
properties = ['author', 'changelog', 'description', 'disclaimer',
'fanart', 'icon', 'id', 'name', 'path', 'profile', 'stars', 'summary',
'type', 'version']
assert id in properties, '%s is not a valid property.' % id
return self._info.get(id, 'Unavailable')
def getLocalizedString(self, id):
key = str(id)
assert key in self._strings, 'id not found in English/strings.po or strings.xml.'
return self._strings[key]
def getSetting(self, id):
log.warning('xbmcaddon.Addon.getSetting() has not been implemented in '
'CLI mode.')
try:
value = self._settings[id]
except KeyError:
# see if we have an env var
value = _get_env_setting(id)
if _get_env_setting(id) is None:
value = raw_input('* Please enter a temporary value for %s: ' %
id)
self._settings[id] = value
return value
def setSetting(self, id, value):
self._settings[id] = value
def openSettings(self):
pass
| jbaiter/plugin.video.brmediathek | resources/lib/xbmcswift2/mockxbmc/xbmcaddon.py | Python | gpl-3.0 | 1,905 |
from uwsgiconf.config import Section, configure_uwsgi
uwsgi_configuration = configure_uwsgi(lambda: Section())
| idlesign/uwsgiconf | tests/confs/dummyone.py | Python | bsd-3-clause | 113 |
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="indicator.title", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/indicator/title/_font.py | Python | mit | 1,514 |
# -*- coding: utf-8 -*-
from IPython.core.display import Javascript, HTML, display_javascript, display_html
def setup_notebook():
# assign text/x-c++src MIME type to pybind11 cells
code = """
require(['notebook/js/codecell'], function(cc) {
cc.CodeCell.options_default.highlight_modes['magic_text/x-c++src'] =
{reg: [/^\s*%%pybind11/]};
});
"""
display_javascript(Javascript(data=code))
# assign non-black colour to C/C++ keywords
html = """
<style>
.cm-s-ipython span.cm-variable-3 {
color: #208ffb;
font-weight: bold;
}
</style>
"""
display_html(HTML(data=html))
| aldanor/ipybind | ipybind/notebook.py | Python | mit | 661 |
"""
You should probably just ignore this ...
"""
import struct
from ppci.wasm.wasm2ppci import create_memories
from ppci.wasm.runtime import create_runtime
from ppci.wasm import
wasm_data = open('rocket.wasm', 'rb').read()
wasm_module = Module(wasm_data)
memories = create_memories(wasm_module)
rt = create_runtime()
# print(memories)
def current_memory():
size = (mem0_end - mem0_start) // (1 << 16)
print('current memory', size)
return size
rt['current_memory'] = current_memory
def grow_memory(delta):
global mem0_end
assert mem0_end == gen_rocket_wasm.heap_top()
print('grow_memory', delta)
gen_rocket_wasm.heap.extend(bytes(delta * 64 * 1024))
mem0_end = gen_rocket_wasm.heap_top()
size = (mem0_end - mem0_start) // (1 << 16)
print('new memory size', size)
return size
# assert mem0_end == len(gen_rocket_wasm.heap) + 0x10000000
rt['grow_memory'] = grow_memory
def get_str(ptr):
""" Get a 0 terminated string """
data = []
while True:
b = gen_rocket_wasm.read_mem(ptr, 1)[0]
if b == 0:
break
else:
data.append(b)
ptr += 1
return bytes(data).decode('ascii')
def trace_func(ptr):
# Lookup name:
print('Trace function entrance:', get_str(ptr))
rt['trace'] = trace_func
print(rt)
import gen_rocket_wasm
# Fixup external functions:
for name, f in rt.items():
# TODO: make a choice between those two options:
gen_rocket_wasm.externals[name] = f
setattr(gen_rocket_wasm, name, f)
# Attach memory:
mem0_start = gen_rocket_wasm.heap_top()
print('memory size:', len(memories[0]), hex(mem0_start))
gen_rocket_wasm.heap.extend(memories[0])
mem0_end = gen_rocket_wasm.heap_top()
wasm_mem0 = gen_rocket_wasm.wasm_mem0_address
gen_rocket_wasm.store_i32(mem0_start, wasm_mem0)
# rocket_wasm.func_pointers[]
gen_rocket_wasm._run_init()
gen_rocket_wasm.draw()
| stonebig/winpython_afterdoc | docs/WASM_almar_klein_demo/play_python.py | Python | mit | 1,902 |
from __future__ import absolute_import
from .array import * # NOQA
from .bounded import * # NOQA
from .citext import * # NOQA
from .encrypted import * # NOQA
from .foreignkey import * # NOQA
from .jsonfield import * # NOQA
from .gzippeddict import * # NOQA
from .node import * # NOQA
from .pickle import * # NOQA
from .uuid import * # NOQA
| mvaled/sentry | src/sentry/db/models/fields/__init__.py | Python | bsd-3-clause | 351 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import os
from shuup.addons import add_enabled_addons
BASE_DIR = os.getenv("SHUUP_WORKBENCH_BASE_DIR") or (
os.path.dirname(os.path.dirname(__file__)))
SECRET_KEY = "Shhhhh"
DEBUG = True
ALLOWED_HOSTS = []
MEDIA_ROOT = os.path.join(BASE_DIR, "var", "media")
STATIC_ROOT = os.path.join(BASE_DIR, "var", "static")
MEDIA_URL = "/media/"
SHUUP_ENABLED_ADDONS_FILE = os.getenv("SHUUP_ENABLED_ADDONS_FILE") or (
os.path.join(BASE_DIR, "var", "enabled_addons"))
INSTALLED_APPS = add_enabled_addons(SHUUP_ENABLED_ADDONS_FILE, [
# django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# external apps that needs to be loaded before Shuup
'easy_thumbnails',
# shuup themes
'shuup.themes.classic_gray',
# shuup
'shuup.addons',
'shuup.admin',
'shuup.api',
'shuup.core',
'shuup.default_tax',
'shuup.front',
'shuup.front.apps.auth',
'shuup.front.apps.carousel',
'shuup.front.apps.customer_information',
'shuup.front.apps.personal_order_history',
'shuup.front.apps.saved_carts',
'shuup.front.apps.registration',
'shuup.front.apps.simple_order_notification',
'shuup.front.apps.simple_search',
'shuup.front.apps.recently_viewed_products',
'shuup.notify',
'shuup.simple_cms',
'shuup.customer_group_pricing',
'shuup.campaigns',
'shuup.simple_supplier',
'shuup.order_printouts',
'shuup.testing',
'shuup.utils',
'shuup.xtheme',
'shuup.reports',
'shuup.default_reports',
'shuup.regions',
'shuup.importer',
'shuup.default_importer',
# external apps
'bootstrap3',
'django_countries',
'django_jinja',
'filer',
'registration',
'rest_framework',
])
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'shuup.front.middleware.ProblemMiddleware',
'shuup.front.middleware.ShuupFrontMiddleware',
]
ROOT_URLCONF = 'shuup_workbench.urls'
WSGI_APPLICATION = 'shuup_workbench.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/'
SOUTH_TESTS_MIGRATE = False # Makes tests that much faster.
DEFAULT_FROM_EMAIL = '[email protected]'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'formatters': {
'verbose': {'format': '[%(asctime)s] (%(name)s:%(levelname)s): %(message)s'},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'shuup': {'handlers': ['console'], 'level': 'DEBUG', 'propagate': True},
}
}
LANGUAGES = [
('en', 'English'),
('fi', 'Finnish'),
('ja', 'Japanese'),
('zh-hans', 'Simplified Chinese'),
('pt-br', 'Portuguese (Brazil)'),
('it', 'Italian'),
]
PARLER_DEFAULT_LANGUAGE_CODE = "en"
PARLER_LANGUAGES = {
None: [{"code": c, "name": n} for (c, n) in LANGUAGES],
'default': {
'hide_untranslated': False,
}
}
_TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages"
]
TEMPLATES = [
{
"BACKEND": "django_jinja.backend.Jinja2",
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".jinja",
"context_processors": _TEMPLATE_CONTEXT_PROCESSORS,
"newstyle_gettext": True,
"environment": "shuup.xtheme.engine.XthemeEnvironment",
},
"NAME": "jinja2",
},
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": _TEMPLATE_CONTEXT_PROCESSORS,
"debug": DEBUG
}
},
]
# set login url here because of `login_required` decorators
LOGIN_URL = "/login/"
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
SHUUP_PRICING_MODULE = "customer_group_pricing"
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',
)
}
SHUUP_SETUP_WIZARD_PANE_SPEC = [
"shuup.admin.modules.shops.views:ShopWizardPane",
"shuup.admin.modules.service_providers.views.PaymentWizardPane",
"shuup.admin.modules.service_providers.views.CarrierWizardPane",
"shuup.xtheme.admin_module.views.ThemeWizardPane"
]
if os.environ.get("SHUUP_WORKBENCH_DISABLE_MIGRATIONS") == "1":
from .utils import DisableMigrations
MIGRATION_MODULES = DisableMigrations()
def configure(setup):
setup.commit(globals())
| suutari/shoop | shuup_workbench/settings/base_settings.py | Python | agpl-3.0 | 6,077 |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Create a blockchain cache.
Creating a cache of the blockchain speeds up test execution when running
multiple functional tests. This helper script is executed by test_runner when multiple
tests are being run in parallel.
"""
from test_framework.test_framework import BitcoinTestFramework
class CreateCache(BitcoinTestFramework):
# Test network and test nodes are not required:
def set_test_params(self):
self.num_nodes = 0
self.supports_cli = True
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
| ericshawlinux/bitcoin | test/functional/create_cache.py | Python | mit | 826 |
#!/usr/bin/python
#
# Convert an 84x48 image into a bitmap suitable for use on SkyDrop.
# The image must be in an arbitrary format understood by ImageMagick and
# in b/w format. Anything in "white" will be transparent and any other
# color will be "black".
#
# If you only give a single input file (and no output file), then the
# image will be dumped in C source format for use by disp.DrawImage.
#
# Copyright 2017 by Dr. Tilmann Bubeck <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import subprocess
def identify(filename, formatFlag):
return subprocess.check_output(["identify", "-format", formatFlag, filename])
def getWidth(filename):
return int(identify(filename, "%w"))
def getHeight(filename):
return int(identify(filename, "%h"))
def getPixel(x, y):
output = subprocess.check_output(["convert", sys.argv[1] + "[1x1+" + str(x) + "+" + str(y) + "]", "txt:"])
if "#FFFFFF" in output:
return 0
else:
return 1
def putPixel(x, y, color):
index = ((y / 8) * width) + (x % width)
if color != 0:
img[index] |= (1 << (y % 8));
else:
img[index] &= ~(1 << (y % 8));
# Check usage:
if len(sys.argv) != 2 and len(sys.argv) != 3:
print "usage: convert-image-to-LOGO.py input-file-of-logo [output]"
print " convert the image into a LOGO file or to a byte array if no output is given."
sys.exit(1)
filename = sys.argv[1]
# These are the dimensions of the image (and the LCD):
width = getWidth(filename)
height = getHeight(filename)
if height % 8 != 0:
print "Height must be multiple of 8!"
sys.exit(1)
# Create a memory array which holds the data and is organized as the LCD memory:
img = []
for i in range(width * height / 8):
img.append(0)
# Fill memory array
for y in range(height):
for x in range(width):
putPixel(x, y, getPixel(x, y))
if len(sys.argv) == 2:
# No output file given, print as byte array
print str(width) + ", " + str(height) + "," + " // width, heigth"
num = 0
for i in img:
print ("0x%0.2X" % i) + "," ,
num = num + 1
if num % 8 == 0:
print
else:
if width != 84 or height != 48:
print "LOGO must be 84x48 pixel."
sys.exit(1)
# Convert into byte array:
img = map(chr, img)
img = "".join(img)
# Write out byte array
f = open(sys.argv[2], "wb");
f.write(img)
f.close()
| dpsfotocestou/SkyDrop | skydrop/utils/img/convert-image-to-LOGO.py | Python | gpl-2.0 | 3,061 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# filename: tops_sql.py
# Copyright 2008-2010 Stefano Costa <[email protected]>
#
# This file is part of Total Open Station.
#
# Total Open Station is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Total Open Station is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Total Open Station. If not, see
# <http://www.gnu.org/licenses/>.
def to_sql(point, tablename):
'''Generate SQL line corresponding to the input point.
At this moment the column names are fixed, but they could change in the
future. The default names are reasonable.'''
params = {
'wkt': to_wkt(point),
'tablename': tablename,
'pid': point[0],
'text': point[4]}
sql_string = "INSERT INTO %(tablename)s" % params
sql_string += "(point_id, point_geom, point_text) VALUES"
sql_string += "(%(pid)s,GeomFromText('%(wkt)s'),'%(text)s');\n" % params
return sql_string
def to_wkt(point):
pid, x, y, z, text = point
wkt_representation = 'POINT(%s %s)' % (x, y)
return wkt_representation
class OutputFormat:
"""
Exports points data in SQL format suitable for use with PostGIS & friends.
http://postgis.refractions.net/documentation/manual-1.3/ch04.html#id2986280
has an example of loading an SQL file into a PostgreSQL database.
``data`` should be an iterable (e.g. list) containing one iterable (e.g.
tuple) for each point. The default order is PID, x, x, z, TEXT.
This is consistent with our current standard.
"""
def __init__(self, data, tablename='topsdata'):
self.data = data
self.tablename = tablename
def process(self):
lines = [to_sql(e, self.tablename) for e in self.data]
lines.insert(0, 'BEGIN;\n')
lines.append('COMMIT;\n')
output = "".join(lines)
return output
if __name__ == "__main__":
TotalOpenSQL(
[(1, 2, 3, 4, 'qwerty'),
("2.3", 42, 45, 12, 'asdfg')],
'prova')
| BackupTheBerlios/tops | totalopenstation/output/tops_sql.py | Python | gpl-3.0 | 2,442 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2019 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from Cerebrum.rest.api import create_app
app = create_app('restconfig')
def main(inargs=None):
import argparse
import Cerebrum.logutils
import Cerebrum.logutils.options
parser = argparse.ArgumentParser(
description="Start flask dev server",
)
bind_opts = parser.add_argument_group('bind options')
bind_opts.add_argument(
'--host',
default=app.config['HOST'],
help='Listen on interface %(metavar)s (%(default)s)',
metavar='<host>',
)
bind_opts.add_argument(
'--port',
type=int,
default=app.config['PORT'],
help='Listen on port %(metavar)s (%(default)s)',
metavar='<port>',
)
debug_opts = parser.add_argument_group('debug options')
debug_mutex = debug_opts.add_mutually_exclusive_group()
debug_default = app.config['DEBUG']
debug_mutex.add_argument(
'--debug',
dest='debug',
action='store_true',
help='Enable debug mode' + (' (default)' if debug_default else ''),
)
debug_mutex.add_argument(
'--no-debug',
dest='debug',
action='store_false',
help='Disable debug mode' + ('' if debug_default else ' (default)'),
)
debug_mutex.set_defaults(debug=debug_default)
Cerebrum.logutils.options.install_subparser(parser)
args = parser.parse_args(inargs)
Cerebrum.logutils.autoconf('console', args)
# Fix flask logging
app.logger.propagate = True
for handler in app.logger.handlers[:]:
app.logger.removeHandler(handler)
app.run(
host=args.host,
port=args.port,
debug=args.debug,
)
if __name__ == '__main__':
main()
| unioslo/cerebrum | Cerebrum/rest/server.py | Python | gpl-2.0 | 2,520 |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from collections import namedtuple
from datetime import datetime
from operator import attrgetter
from dateutil.relativedelta import relativedelta
from flask import flash, jsonify, redirect, request, session
from markupsafe import Markup, escape
from sqlalchemy.orm import joinedload, load_only, subqueryload, undefer
from sqlalchemy.orm.exc import StaleDataError
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from indico.core import signals
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import get_n_matching
from indico.core.notifications import make_email, send_email
from indico.legacy.common.cache import GenericCache
from indico.legacy.common.mail import GenericMailer
from indico.modules.admin import RHAdminBase
from indico.modules.auth import Identity
from indico.modules.auth.models.registration_requests import RegistrationRequest
from indico.modules.auth.util import register_user
from indico.modules.categories import Category
from indico.modules.events import Event
from indico.modules.users import User, logger, user_management_settings
from indico.modules.users.forms import (AdminAccountRegistrationForm, AdminsForm, AdminUserSettingsForm, MergeForm,
SearchForm, UserDetailsForm, UserEmailsForm, UserPreferencesForm)
from indico.modules.users.models.emails import UserEmail
from indico.modules.users.operations import create_user
from indico.modules.users.util import (get_linked_events, get_related_categories, get_suggested_categories, merge_users,
search_users, serialize_user)
from indico.modules.users.views import WPUser, WPUsersAdmin
from indico.util.date_time import now_utc, timedelta_split
from indico.util.event import truncate_path
from indico.util.i18n import _
from indico.util.signals import values_from_signal
from indico.util.string import make_unique_token
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.rh import RHProtected
from indico.web.util import jsonify_data, jsonify_form, jsonify_template
IDENTITY_ATTRIBUTES = {'first_name', 'last_name', 'email', 'affiliation', 'full_name'}
UserEntry = namedtuple('UserEntry', IDENTITY_ATTRIBUTES | {'profile_url', 'user'})
class RHUserBase(RHProtected):
flash_user_status = True
allow_system_user = False
def _process_args(self):
if not session.user:
return
self.user = session.user
if 'user_id' in request.view_args:
self.user = User.get(request.view_args['user_id'])
if self.user is None:
raise NotFound('This user does not exist')
elif request.method == 'GET' and not request.is_xhr and self.flash_user_status:
# Show messages about the user's status if it's a simple GET request
if self.user.is_deleted:
if self.user.merged_into_id is not None:
msg = _('This user has been merged into <a href="{url}">another user</a>.')
flash(Markup(msg).format(url=url_for(request.endpoint, self.user.merged_into_user)), 'warning')
else:
flash(_('This user is marked as deleted.'), 'warning')
if self.user.is_pending:
flash(_('This user is marked as pending, i.e. it has been attached to something but never '
'logged in.'), 'warning')
if not self.allow_system_user and self.user.is_system:
return redirect(url_for('users.user_profile'))
def _check_access(self):
RHProtected._check_access(self)
if not self.user.can_be_modified(session.user):
raise Forbidden('You cannot modify this user.')
class RHUserDashboard(RHUserBase):
management_roles = {'conference_creator', 'conference_chair', 'conference_manager', 'session_manager',
'session_coordinator', 'contribution_manager'}
reviewer_roles = {'paper_manager', 'paper_judge', 'paper_content_reviewer', 'paper_layout_reviewer',
'contribution_referee', 'contribution_editor', 'contribution_reviewer', 'abstract_reviewer',
'track_convener'}
attendance_roles = {'contributor', 'contribution_submission', 'abstract_submitter', 'abstract_person',
'registration_registrant', 'survey_submitter', 'lecture_speaker'}
def _process(self):
self.user.settings.set('suggest_categories', True)
tz = session.tzinfo
hours, minutes = timedelta_split(tz.utcoffset(datetime.now()))[:2]
categories = get_related_categories(self.user)
categories_events = []
if categories:
category_ids = {c['categ'].id for c in categories.itervalues()}
today = now_utc(False).astimezone(tz).date()
query = (Event.query
.filter(~Event.is_deleted,
Event.category_chain_overlaps(category_ids),
Event.start_dt.astimezone(session.tzinfo) >= today)
.options(joinedload('category').load_only('id', 'title'),
joinedload('series'),
subqueryload('acl_entries'),
load_only('id', 'category_id', 'start_dt', 'end_dt', 'title', 'access_key',
'protection_mode', 'series_id', 'series_pos', 'series_count'))
.order_by(Event.start_dt, Event.id))
categories_events = get_n_matching(query, 10, lambda x: x.can_access(self.user))
from_dt = now_utc(False) - relativedelta(weeks=1, hour=0, minute=0, second=0)
linked_events = [(event, {'management': bool(roles & self.management_roles),
'reviewing': bool(roles & self.reviewer_roles),
'attendance': bool(roles & self.attendance_roles)})
for event, roles in get_linked_events(self.user, from_dt, 10).iteritems()]
return WPUser.render_template('dashboard.html', 'dashboard',
offset='{:+03d}:{:02d}'.format(hours, minutes), user=self.user,
categories=categories,
categories_events=categories_events,
suggested_categories=get_suggested_categories(self.user),
linked_events=linked_events)
class RHPersonalData(RHUserBase):
allow_system_user = True
def _process(self):
form = UserDetailsForm(obj=FormDefaults(self.user, skip_attrs={'title'}, title=self.user._title),
synced_fields=self.user.synced_fields, synced_values=self.user.synced_values)
if form.validate_on_submit():
self.user.synced_fields = form.synced_fields
form.populate_obj(self.user, skip=self.user.synced_fields)
self.user.synchronize_data(refresh=True)
flash(_('Your personal data was successfully updated.'), 'success')
return redirect(url_for('.user_profile'))
return WPUser.render_template('personal_data.html', 'personal_data', user=self.user, form=form)
class RHUserPreferences(RHUserBase):
def _process(self):
extra_preferences = [pref(self.user) for pref in values_from_signal(signals.users.preferences.send(self.user))]
form_class = UserPreferencesForm
defaults = FormDefaults(**self.user.settings.get_all(self.user))
for pref in extra_preferences:
form_class = pref.extend_form(form_class)
pref.extend_defaults(defaults)
form = form_class(obj=defaults)
if form.validate_on_submit():
data = form.data
for pref in extra_preferences:
pref.process_form_data(data)
self.user.settings.set_multi(data)
session.lang = self.user.settings.get('lang')
session.timezone = (self.user.settings.get('timezone') if self.user.settings.get('force_timezone')
else 'LOCAL')
flash(_('Preferences saved'), 'success')
return redirect(url_for('.user_preferences'))
return WPUser.render_template('preferences.html', 'preferences', user=self.user, form=form)
class RHUserFavorites(RHUserBase):
def _process(self):
query = (Category.query
.filter(Category.id.in_(c.id for c in self.user.favorite_categories))
.options(undefer('chain_titles')))
categories = sorted([(cat, truncate_path(cat.chain_titles[:-1], chars=50)) for cat in query],
key=lambda c: (c[0].title, c[1]))
return WPUser.render_template('favorites.html', 'favorites', user=self.user, favorite_categories=categories)
class RHUserFavoritesUsersAdd(RHUserBase):
def _process(self):
users = [User.get(int(id_)) for id_ in request.form.getlist('user_id')]
self.user.favorite_users |= set(filter(None, users))
tpl = get_template_module('users/_favorites.html')
return jsonify(success=True, users=[serialize_user(user) for user in users],
html=tpl.favorite_users_list(self.user))
class RHUserFavoritesUserRemove(RHUserBase):
def _process(self):
user = User.get(int(request.view_args['fav_user_id']))
self.user.favorite_users.discard(user)
try:
db.session.flush()
except StaleDataError:
# Deleted in another transaction
db.session.rollback()
return jsonify(success=True)
class RHUserFavoritesCategoryAPI(RHUserBase):
def _process_args(self):
RHUserBase._process_args(self)
self.category = Category.get_one(request.view_args['category_id'])
self.suggestion = self.user.suggested_categories.filter_by(category=self.category).first()
def _process_PUT(self):
if self.category not in self.user.favorite_categories:
if not self.category.can_access(self.user):
raise Forbidden()
self.user.favorite_categories.add(self.category)
if self.suggestion:
self.user.suggested_categories.remove(self.suggestion)
return jsonify(success=True)
def _process_DELETE(self):
if self.category in self.user.favorite_categories:
self.user.favorite_categories.discard(self.category)
try:
db.session.flush()
except StaleDataError:
# Deleted in another transaction
db.session.rollback()
suggestion = self.user.suggested_categories.filter_by(category=self.category).first()
if suggestion:
self.user.suggested_categories.remove(suggestion)
return jsonify(success=True)
class RHUserSuggestionsRemove(RHUserBase):
def _process(self):
suggestion = self.user.suggested_categories.filter_by(category_id=request.view_args['category_id']).first()
if suggestion:
suggestion.is_ignored = True
return jsonify(success=True)
class RHUserEmails(RHUserBase):
def _send_confirmation(self, email):
token_storage = GenericCache('confirm-email')
data = {'email': email, 'user_id': self.user.id}
token = make_unique_token(lambda t: not token_storage.get(t))
token_storage.set(token, data, 24 * 3600)
GenericMailer.send(make_email(email, template=get_template_module('users/emails/verify_email.txt',
user=self.user, email=email, token=token)))
def _process(self):
form = UserEmailsForm()
if form.validate_on_submit():
self._send_confirmation(form.email.data)
flash(_("We have sent an email to {email}. Please click the link in that email within 24 hours to "
"confirm your new email address.").format(email=form.email.data), 'success')
return redirect(url_for('.user_emails'))
return WPUser.render_template('emails.html', 'emails', user=self.user, form=form)
class RHUserEmailsVerify(RHUserBase):
flash_user_status = False
token_storage = GenericCache('confirm-email')
def _validate(self, data):
if not data:
flash(_('The verification token is invalid or expired.'), 'error')
return False, None
user = User.get(data['user_id'])
if not user or user != self.user:
flash(_('This token is for a different Indico user. Please login with the correct account'), 'error')
return False, None
existing = UserEmail.find_first(is_user_deleted=False, email=data['email'])
if existing and not existing.user.is_pending:
if existing.user == self.user:
flash(_('This email address is already attached to your account.'))
else:
flash(_('This email address is already in use by another account.'), 'error')
return False, existing.user
return True, existing.user if existing else None
def _process(self):
token = request.view_args['token']
data = self.token_storage.get(token)
valid, existing = self._validate(data)
if valid:
self.token_storage.delete(token)
if existing and existing.is_pending:
logger.info("Found pending user %s to be merged into %s", existing, self.user)
# If the pending user has missing names, copy them from the active one
# to allow it to be marked as not pending and deleted during the merge.
existing.first_name = existing.first_name or self.user.first_name
existing.last_name = existing.last_name or self.user.last_name
merge_users(existing, self.user)
flash(_("Merged data from existing '{}' identity").format(existing.email))
existing.is_pending = False
self.user.secondary_emails.add(data['email'])
signals.users.email_added.send(self.user, email=data['email'])
flash(_('The email address {email} has been added to your account.').format(email=data['email']), 'success')
return redirect(url_for('.user_emails'))
class RHUserEmailsDelete(RHUserBase):
def _process(self):
email = request.view_args['email']
if email in self.user.secondary_emails:
self.user.secondary_emails.remove(email)
return jsonify(success=True)
class RHUserEmailsSetPrimary(RHUserBase):
def _process(self):
email = request.form['email']
if email in self.user.secondary_emails:
self.user.make_email_primary(email)
flash(_('Your primary email was updated successfully.'), 'success')
return redirect(url_for('.user_emails'))
class RHAdmins(RHAdminBase):
"""Show Indico administrators"""
def _process(self):
admins = set(User.query
.filter_by(is_admin=True, is_deleted=False)
.order_by(db.func.lower(User.first_name), db.func.lower(User.last_name)))
form = AdminsForm(admins=admins)
if form.validate_on_submit():
added = form.admins.data - admins
removed = admins - form.admins.data
for user in added:
user.is_admin = True
logger.warn('Admin rights granted to %r by %r [%s]', user, session.user, request.remote_addr)
flash(_('Admin added: {name} ({email})').format(name=user.name, email=user.email), 'success')
for user in removed:
user.is_admin = False
logger.warn('Admin rights revoked from %r by %r [%s]', user, session.user, request.remote_addr)
flash(_('Admin removed: {name} ({email})').format(name=user.name, email=user.email), 'success')
return redirect(url_for('.admins'))
return WPUsersAdmin.render_template('admins.html', 'admins', form=form)
class RHUsersAdmin(RHAdminBase):
"""Admin users overview"""
def _process(self):
form = SearchForm(obj=FormDefaults(exact=True))
form_data = form.data
search_results = None
num_of_users = User.query.count()
num_deleted_users = User.find(is_deleted=True).count()
if form.validate_on_submit():
search_results = []
exact = form_data.pop('exact')
include_deleted = form_data.pop('include_deleted')
include_pending = form_data.pop('include_pending')
external = form_data.pop('external')
form_data = {k: v for (k, v) in form_data.iteritems() if v and v.strip()}
matches = search_users(exact=exact, include_deleted=include_deleted, include_pending=include_pending,
external=external, allow_system_user=True, **form_data)
for entry in matches:
if isinstance(entry, User):
search_results.append(UserEntry(
profile_url=url_for('.user_profile', entry),
user=entry,
**{k: getattr(entry, k) for k in IDENTITY_ATTRIBUTES}
))
else:
search_results.append(UserEntry(
profile_url=None,
user=None,
full_name="{first_name} {last_name}".format(**entry.data.to_dict()),
**{k: entry.data.get(k) for k in (IDENTITY_ATTRIBUTES - {'full_name'})}
))
search_results.sort(key=attrgetter('first_name', 'last_name'))
num_reg_requests = RegistrationRequest.query.count()
return WPUsersAdmin.render_template('users_admin.html', 'users', form=form, search_results=search_results,
num_of_users=num_of_users, num_deleted_users=num_deleted_users,
num_reg_requests=num_reg_requests)
class RHUsersAdminSettings(RHAdminBase):
"""Manage global suer-related settings."""
def _process(self):
form = AdminUserSettingsForm(obj=FormDefaults(**user_management_settings.get_all()))
if form.validate_on_submit():
user_management_settings.set_multi(form.data)
return jsonify_data(flash=False)
return jsonify_form(form)
class RHUsersAdminCreate(RHAdminBase):
"""Create user (admin)"""
def _process(self):
form = AdminAccountRegistrationForm()
if form.validate_on_submit():
data = form.data
if data.pop('create_identity', False):
identity = Identity(provider='indico', identifier=data.pop('username'), password=data.pop('password'))
else:
identity = None
data.pop('username', None)
data.pop('password', None)
user = create_user(data.pop('email'), data, identity, from_moderation=True)
msg = Markup('{} <a href="{}">{}</a>').format(
escape(_('The account has been created.')),
url_for('users.user_profile', user),
escape(_('Show details'))
)
flash(msg, 'success')
return jsonify_data()
return jsonify_template('users/users_admin_create.html', form=form)
def _get_merge_problems(source, target):
errors = []
warnings = []
if source == target:
errors.append(_("Users are the same!"))
if (source.first_name.strip().lower() != target.first_name.strip().lower() or
source.last_name.strip().lower() != target.last_name.strip().lower()):
warnings.append(_("Users' names seem to be different!"))
if source.is_pending:
warnings.append(_("Source user has never logged in to Indico!"))
if target.is_pending:
warnings.append(_("Target user has never logged in to Indico!"))
if source.is_deleted:
errors.append(_("Source user has been deleted!"))
if target.is_deleted:
errors.append(_("Target user has been deleted!"))
if source.is_admin:
warnings.append(_("Source user is an administrator!"))
if target.is_admin:
warnings.append(_("Target user is an administrator!"))
if source.is_admin and not target.is_admin:
errors.append(_("Source user is an administrator but target user isn't!"))
return errors, warnings
class RHUsersAdminMerge(RHAdminBase):
"""Merge users (admin)"""
def _process(self):
form = MergeForm()
if form.validate_on_submit():
source = form.source_user.data
target = form.target_user.data
errors, warnings = _get_merge_problems(source, target)
if errors:
raise BadRequest(_('Merge aborted due to failed sanity check'))
if warnings:
logger.info("User %s initiated merge of %s into %s (with %d warnings)",
session.user, source, target, len(warnings))
else:
logger.info("User %s initiated merge of %s into %s", session.user, source, target)
merge_users(source, target)
flash(_('The users have been successfully merged.'), 'success')
return redirect(url_for('.user_profile', user_id=target.id))
return WPUsersAdmin.render_template('users_merge.html', 'users', form=form)
class RHUsersAdminMergeCheck(RHAdminBase):
def _process(self):
source = User.get_one(request.args['source'])
target = User.get_one(request.args['target'])
errors, warnings = _get_merge_problems(source, target)
return jsonify(errors=errors, warnings=warnings)
class RHRegistrationRequestList(RHAdminBase):
"""List all registration requests"""
def _process(self):
requests = RegistrationRequest.query.order_by(RegistrationRequest.email).all()
return WPUsersAdmin.render_template('registration_requests.html', 'users', pending_requests=requests)
class RHRegistrationRequestBase(RHAdminBase):
"""Base class to process a registration request"""
def _process_args(self):
RHAdminBase._process_args(self)
self.request = RegistrationRequest.get_one(request.view_args['request_id'])
class RHAcceptRegistrationRequest(RHRegistrationRequestBase):
"""Accept a registration request"""
def _process(self):
user, identity = register_user(self.request.email, self.request.extra_emails, self.request.user_data,
self.request.identity_data, self.request.settings)
tpl = get_template_module('users/emails/registration_request_accepted.txt', user=user)
send_email(make_email(self.request.email, template=tpl))
flash(_('The request has been approved.'), 'success')
return jsonify_data()
class RHRejectRegistrationRequest(RHRegistrationRequestBase):
"""Reject a registration request"""
def _process(self):
db.session.delete(self.request)
tpl = get_template_module('users/emails/registration_request_rejected.txt', req=self.request)
send_email(make_email(self.request.email, template=tpl))
flash(_('The request has been rejected.'), 'success')
return jsonify_data()
| eliasdesousa/indico | indico/modules/users/controllers.py | Python | gpl-3.0 | 24,412 |
#!/usr/bin/python3
#
# CherryMusic - a standalone music server
# Copyright (c) 2012 - 2014 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
import os
import tempfile
from nose.tools import *
import audiotranscode as transcode
transcoder = transcode.AudioTranscode(debug=True)
testdir = os.path.dirname(__file__)
testfiles = {
'mp3' : os.path.join(testdir,'test.mp3'),
'ogg' : os.path.join(testdir,'test.ogg'),
'flac': os.path.join(testdir,'test.flac'),
'wav': os.path.join(testdir,'test.wav'),
'm4a': os.path.join(testdir,'test.m4a'),
'wma': os.path.join(testdir,'test.wma'),
}
outputpath = tempfile.mkdtemp(prefix='test.audiotranscode.output.')
def generictestfunc(filepath, newformat, encoder, decoder):
ident = "%s_%s_to_%s_%s" % (
decoder.command[0],
os.path.basename(filepath),
encoder.command[0],
newformat
)
outdata = b''
for data in transcoder.transcode_stream(filepath, newformat, encoder=encoder, decoder=decoder):
outdata += data
ok_(len(outdata)>0, 'No data received: '+ident)
with open(os.path.join(outputpath,ident+'.'+newformat),'wb') as outfile:
outfile.write(outdata)
def test_generator():
for enc in transcoder.Encoders:
if not enc.available():
print('Encoder %s not installed!'%enc.command[0])
continue
for dec in transcoder.Decoders:
if not dec.available():
print('Encoder %s not installed!'%dec.command[0])
continue
if dec.filetype in testfiles:
yield generictestfunc, testfiles[dec.filetype], enc.filetype, enc, dec
| andpp/cherrymusic | audiotranscode/test/test_transcode.py | Python | gpl-3.0 | 2,593 |
"""A variety of useful functions to keep Python scripts powerful while short and sweet.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/borgesp/pypi-pmb
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
#from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pmb',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.13',
description="A variety of useful functions to keep Python scripts powerful while short and sweet",
long_description=long_description,
# The project's main homepage.
url='https://github.com/borgesp/pypi-pmb',
# Author details
author='Pedro Borges',
author_email='[email protected]',
# Choose your license
license='GPLv3+',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='Utils Shell AWS',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
#install_requires=['subprocess'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
) | borgesp/pypi-pmb | setup.py | Python | gpl-3.0 | 4,557 |
import os, uuid
def generate(filename, depth=2, base=512):
if os.path.sep in filename:
path, filename = os.path.split(filename)
else:
path = None
dummyhash = sum(ord(c)*256**(i % 4) for i,c in enumerate(filename)) % base**depth
folders = []
for level in range(depth-1,-1,-1):
code, dummyhash = divmod(dummyhash, base**level)
folders.append("%03x" % code)
folders.append(filename)
if path:
folders.insert(0,path)
return os.path.join(*folders)
def exists(filename, path=None):
if os.path.exists(filename):
return True
if path is None:
path, filename = os.path.split(filename)
fullfilename = os.path.join(path, generate(filename))
if os.path.exists(fullfilename):
return True
return False
def remove(filename, path=None):
if os.path.exists(filename):
return os.unlink(filename)
if path is None:
path, filename = os.path.split(filename)
fullfilename = os.path.join(path, generate(filename))
if os.path.exists(fullfilename):
return os.unlink(fullfilename)
raise IOError
def open(filename, mode="r", path=None):
if not path:
path, filename = os.path.split(filename)
fullfilename = None
if not mode.startswith('w'):
fullfilename = os.path.join(path, filename)
if not os.path.exists(fullfilename):
fullfilename = None
if not fullfilename:
fullfilename = os.path.join(path, generate(filename))
if mode.startswith('w') and not os.path.exists(os.path.dirname(fullfilename)):
os.makedirs(os.path.dirname(fullfilename))
return file(fullfilename, mode)
def test():
if not os.path.exists('tests'):
os.mkdir('tests')
for k in range(20):
filename = os.path.join('tests',str(uuid.uuid4())+'.test')
open(filename, "w").write('test')
assert open(filename, "r").read()=='test'
if exists(filename):
remove(filename)
if __name__ == '__main__':
test()
| Titosoft/ferry-boat | web2py/gluon/recfile.py | Python | mit | 2,105 |
from ase import Atoms
from ase.constraints import FixLinearTriatomic
from ase.calculators.acn import (ACN, m_me,
r_mec, r_cn)
from ase.md import Langevin
import ase.units as units
from ase.io import Trajectory
import numpy as np
pos = [[0, 0, -r_mec],
[0, 0, 0],
[0, 0, r_cn]]
atoms = Atoms('CCN', positions=pos)
atoms.rotate(30, 'x')
# First C of each molecule needs to have the mass of a methyl group
masses = atoms.get_masses()
masses[::3] = m_me
atoms.set_masses(masses)
# Determine side length of a box with the density of acetonitrile at 298 K
d = 0.776 / 1e24 # Density in g/Ang3 (https://pubs.acs.org/doi/10.1021/je00001a006)
L = ((masses.sum() / units.mol) / d)**(1 / 3.)
# Set up box of 27 acetonitrile molecules
atoms.set_cell((L, L, L))
atoms.center()
atoms = atoms.repeat((3, 3, 3))
atoms.set_pbc(True)
# Set constraints for rigid triatomic molecules
nm = 27
atoms.constraints = FixLinearTriatomic(
triples=[(3 * i, 3 * i + 1, 3 * i + 2)
for i in range(nm)])
tag = 'acn_27mol_300K'
atoms.calc = ACN(rc=np.min(np.diag(atoms.cell))/2)
# Create Langevin object
md = Langevin(atoms, 1 * units.fs,
temperature=300 * units.kB,
friction=0.01,
logfile=tag + '.log')
traj = Trajectory(tag + '.traj', 'w', atoms)
md.attach(traj.write, interval=1)
md.run(5000)
# Repeat box and equilibrate further
atoms.set_constraint()
atoms = atoms.repeat((2, 2, 2))
nm = 216
atoms.constraints = FixLinearTriatomic(
triples=[(3 * i, 3 * i + 1, 3 * i + 2)
for i in range(nm)])
tag = 'acn_216mol_300K'
atoms.calc = ACN(rc=np.min(np.diag(atoms.cell))/2)
# Create Langevin object
md = Langevin(atoms, 2 * units.fs,
temperature=300 * units.kB,
friction=0.01,
logfile=tag + '.log')
traj = Trajectory(tag + '.traj', 'w', atoms)
md.attach(traj.write, interval=1)
md.run(3000)
| miroi/open-collection | theoretical_chemistry/software_runs/ase/runs/acn_equil/acn_equil.py | Python | mit | 2,006 |
#! /usr/bin/env python
# -*- encoding: utf-8 -*-
# vim:fenc=utf-8:
from mole.event import Event
from mole.action import Action, ActionSyntaxError
from mole.parser import Parser
class ActionParser(Parser):
"""This action parse the pipeline using specific parser
:param `name`: a list with string representation of the parser to be loaded.
"""
def __init__(self, name,*args, **kwargs):
if len(name) > 1:
raise ActionSyntaxError("parser command only allow one single parameter")
self.item = None
self.name = name[0]
def _init_item(self):
if self.item is None:
if self.context:
self.item = self.context[self.name].parser
else:
self.item = []
def __iter__(self):
return iter(self.item)
def __call__(self, pipeline):
self._init_item()
return self.item(pipeline)
def get_object(self):
return self.context[self.name]
| ajdiaz/mole | mole/action/parser.py | Python | gpl-3.0 | 983 |
# encoding: UTF-8
from vnpy.trader import vtConstant
from .tkproGateway import TkproGateway
gatewayClass = TkproGateway
gatewayName = 'TKPRO'
gatewayDisplayName = 'TKPRO'
gatewayType = vtConstant.GATEWAYTYPE_EQUITY
gatewayQryEnabled = True
| harveywwu/vnpy | vnpy/trader/gateway/tkproGateway/__init__.py | Python | mit | 242 |
"""This module contains various functions usued internally to process colours,
mainly for validation pruposes currently."""
import colorsys
def process_color(color):
"""Raises exceptions if a colour does not meet requirements.
:param str color: The colour to check.
:raises TypeError: if the colour is not a string.
:raises ValueError: if the colour is not valid."""
if not isinstance(color, str):
raise TypeError("Color must be str, not '%s'" % color)
elif not is_valid_color(color):
raise ValueError("'%s' is not a valid color" % color)
else:
return color.upper()
def is_valid_color(color):
"""Checks that a given string represents a valid hex colour.
:param str color: The color to check.
:rtype: ``bool``"""
if not color:
return False
if color[0] != "#":
return False
if len(color) != 7:
return False
for char in color[1:]:
if char.upper() not in "0123456789ABCDEF":
return False
return True
def hsl_to_rgb(hue, saturation, lightness):
"""Takes a colour in HSL format and produces an RGB string in the form
#RRGGBB.
:param hue: The Hue value (between 0 and 360).
:param saturation: The Saturation value (between 0 and 100).
:param lightness: The Lightness value (between 0 and 100).
:raises ValueError: if any of the three parameters are outside their \
bounds."""
if not isinstance(hue, int) and not isinstance(hue, float):
raise TypeError("hue must be numeric, not '%s'" % hue)
if not isinstance(saturation, int) and not isinstance(saturation, float):
raise TypeError("saturation must be numeric, not '%s'" % saturation)
if not isinstance(lightness, int) and not isinstance(lightness, float):
raise TypeError("lightness must be numeric, not '%s'" % lightness)
if not (0 <= hue <= 360):
raise ValueError("hue must be between 0 and 360, not '%s'" % str(hue))
if not (0 <= saturation <= 100):
raise ValueError(
"saturation must be between 0 and 100, not '%s'" % str(saturation)
)
if not (0 <= lightness <= 100):
raise ValueError(
"lightness must be between 0 and 100, not '%s'" % str(lightness)
)
r, g, b = colorsys.hls_to_rgb(hue / 360, lightness / 100, saturation / 100)
return ("#%02x%02x%02x" % (int(r * 255), int(g * 255), int(b * 255))).upper()
RED = "#EE4035"
BLUE = "#0392CF"
GREEN = "#7BC043"
YELLOW = "#FEFFA3"
ORANGE = "#FF7736"
PURPLE = "#9E379F"
PINK = "#FFC5D9"
BROWN = "#6B3E26"
PALEBLUE = "#AADDFF"
PALEGREEN = "#B3E784"
BLACK = "#3D332B"
colors = [
RED, BLUE, GREEN, YELLOW, ORANGE, PURPLE, PINK, BROWN, PALEBLUE, PALEGREEN,
BLACK
]
"""Color constants"""
| samirelanduk/omnicanvas | omnicanvas/color.py | Python | mit | 2,764 |
import pytest
from tests.utils.loop import CounterLoop
@pytest.fixture(autouse=True)
def _(module_launcher_launch):
pass
def test_move_from_rep1(module_launcher):
module_launcher.create_file('default', 'to_move1')
module_launcher.move_file('default', 'to_move1', 'moved1',
*module_launcher.get_services('rep1', 'rep2'))
def test_move_from_rep2(module_launcher):
module_launcher.create_file('default', 'to_move2')
module_launcher.move_file('default', 'to_move2', 'moved2',
*module_launcher.get_services('rep2', 'rep1'))
def test_move_in_subdirs(module_launcher):
src, dest = module_launcher.get_services('rep1', 'rep2')
module_launcher.create_file('default', 'test/with/subdirs/foo')
loop = CounterLoop(2)
module_launcher.on_file_moved(
loop.check, driver=src,
src='test/with/subdirs/foo', dest='test/to/other/dir/bar',
folder='default'
)
module_launcher.on_move_completed(
loop.check, driver=dest,
src='test/with/subdirs/foo', dest='test/to/other/dir/bar'
)
src.rename(
src.path('default', 'test/with/subdirs/foo'),
src.path('default', 'test/to/other/dir/bar')
)
loop.run(timeout=5)
assert not dest.exists(dest.path('default', 'test/with/subdirs/foo'))
assert dest.exists(dest.path('default', 'test/to/other/dir/bar'))
def test_move_dir_from_rep1(module_launcher):
src, dest = module_launcher.get_services('rep1', 'rep2')
module_launcher.create_file('default', 'dir/foo')
module_launcher.create_file('default', 'dir/bar')
loop = CounterLoop(4)
module_launcher.on_file_moved(
loop.check, driver=src, src='dir/foo', dest='other/foo',
folder='default'
)
module_launcher.on_file_moved(
loop.check, driver=src, src='dir/bar', dest='other/bar',
folder='default'
)
module_launcher.on_move_completed(
loop.check, driver=dest, src='dir/foo', dest='other/foo'
)
module_launcher.on_move_completed(
loop.check, driver=dest, src='dir/bar', dest='other/bar'
)
src.rename(src.path('default', 'dir'), src.path('default', 'other'))
loop.run(timeout=5)
assert not dest.exists(dest.path('default', 'dir/foo'))
assert not dest.exists(dest.path('default', 'dir/bar'))
assert dest.exists(dest.path('default', 'other/foo'))
assert dest.exists(dest.path('default', 'other/bar'))
| onitu/onitu | tests/functional/core/test_move.py | Python | mit | 2,480 |
import unittest
from tests import mock
import pyglet.clock
class ClockTestCase(unittest.TestCase):
"""Test clock using dummy time keeper
not tested:
positional and named arguments
"""
def setUp(self):
self.interval = .001
self.time = 0
self.callback_a = mock.Mock()
self.callback_b = mock.Mock()
self.callback_c = mock.Mock()
self.callback_d = mock.Mock()
self.clock = pyglet.clock.Clock(time_function=lambda: self.time)
def advance_clock(self, dt=1):
"""simulate the passage of time like a real clock would"""
frames = 0
end = self.time + dt
while self.time < end:
frames += 1
self.time += self.interval
self.clock.tick()
self.time = round(self.time, 0)
return frames
def test_schedule(self):
self.clock.schedule(self.callback_a)
frames = self.advance_clock()
self.assertEqual(self.callback_a.call_count, frames)
def test_schedule_once(self):
self.clock.schedule_once(self.callback_a, 1)
self.advance_clock(2)
self.assertEqual(self.callback_a.call_count, 1)
def test_schedule_once_multiple(self):
self.clock.schedule_once(self.callback_a, 1)
self.clock.schedule_once(self.callback_b, 2)
self.advance_clock(2)
self.assertEqual(self.callback_a.call_count, 1)
self.assertEqual(self.callback_b.call_count, 1)
def test_schedule_interval(self):
self.clock.schedule_interval(self.callback_a, 1)
self.advance_clock(2)
self.assertEqual(self.callback_a.call_count, 2)
def test_schedule_interval_multiple(self):
self.clock.schedule_interval(self.callback_a, 1)
self.clock.schedule_interval(self.callback_b, 1)
self.advance_clock(2)
self.assertEqual(self.callback_a.call_count, 2)
self.assertEqual(self.callback_b.call_count, 2)
def test_schedule_interval_soft(self):
self.clock.schedule_interval_soft(self.callback_a, 1)
self.advance_clock(2)
self.assertEqual(self.callback_a.call_count, 2)
@unittest.skip('Requires changes to the clock')
def test_schedule_interval_soft_multiple(self):
self.clock.schedule_interval(self.callback_a, 1)
self.clock.schedule_interval_soft(self.callback_b, 1)
self.clock.schedule_interval_soft(self.callback_b, 1)
next_ts = set(i.next_ts for i in self.clock._scheduled_items)
self.assertEqual(len(next_ts), 3)
self.advance_clock()
self.assertEqual(self.callback_a.call_count, 1)
self.assertEqual(self.callback_b.call_count, 2)
def test_schedule_unschedule(self):
self.clock.schedule(self.callback_a)
self.clock.unschedule(self.callback_a)
self.advance_clock()
self.assertEqual(self.callback_a.call_count, 0)
def test_schedule_once_unschedule(self):
self.clock.schedule_once(self.callback_a, 1)
self.clock.unschedule(self.callback_a)
self.advance_clock()
self.assertEqual(self.callback_a.call_count, 0)
def test_schedule_interval_unschedule(self):
self.clock.schedule_interval(self.callback_a, 1)
self.clock.unschedule(self.callback_a)
self.advance_clock()
self.assertEqual(self.callback_a.call_count, 0)
def test_schedule_interval_soft_unschedule(self):
self.clock.schedule_interval_soft(self.callback_a, 1)
self.clock.unschedule(self.callback_a)
self.advance_clock()
self.assertEqual(self.callback_a.call_count, 0)
def test_unschedule_removes_all(self):
self.clock.schedule(self.callback_a)
self.clock.schedule_once(self.callback_a, 1)
self.clock.schedule_interval(self.callback_a, 1)
self.clock.schedule_interval_soft(self.callback_a, 1)
self.clock.schedule(self.callback_a)
self.clock.schedule(self.callback_b)
self.clock.unschedule(self.callback_a)
frames = self.advance_clock(10)
self.assertEqual(self.callback_a.call_count, 0)
# callback_b is used to verify that the entire event queue was not cleared
self.assertEqual(self.callback_b.call_count, frames)
def test_schedule_will_not_call_function(self):
self.clock.schedule(self.callback_a)
self.assertEqual(self.callback_a.call_count, 0)
self.clock.schedule_once(self.callback_a, 0)
self.assertEqual(self.callback_a.call_count, 0)
self.clock.schedule_interval(self.callback_a, 1)
self.assertEqual(self.callback_a.call_count, 0)
self.clock.schedule_interval_soft(self.callback_a, 1)
self.assertEqual(self.callback_a.call_count, 0)
def test_unschedule_will_not_call_function(self):
self.clock.schedule(self.callback_a)
self.clock.unschedule(self.callback_a)
self.assertEqual(self.callback_a.call_count, 0)
self.clock.schedule_once(self.callback_a, 0)
self.clock.unschedule(self.callback_a)
self.assertEqual(self.callback_a.call_count, 0)
self.clock.schedule_interval(self.callback_a, 1)
self.clock.unschedule(self.callback_a)
self.assertEqual(self.callback_a.call_count, 0)
self.clock.schedule_interval_soft(self.callback_a, 1)
self.clock.unschedule(self.callback_a)
self.assertEqual(self.callback_a.call_count, 0)
def test_unschedule_will_not_fail_if_already_unscheduled(self):
self.clock.schedule(self.callback_a)
self.clock.unschedule(self.callback_a)
self.clock.unschedule(self.callback_a)
self.clock.schedule_once(self.callback_a, 0)
self.clock.unschedule(self.callback_a)
self.clock.unschedule(self.callback_a)
self.clock.schedule_interval(self.callback_a, 1)
self.clock.unschedule(self.callback_a)
self.clock.unschedule(self.callback_a)
self.clock.schedule_interval_soft(self.callback_a, 1)
self.clock.unschedule(self.callback_a)
self.clock.unschedule(self.callback_a)
def test_call_sched_return_True_if_called_functions(self):
self.clock.schedule(self.callback_a)
self.assertTrue(self.clock.call_scheduled_functions(0))
@unittest.skip('Requires changes to the clock')
def test_call_sched_return_True_if_called_functions_interval(self):
self.clock.schedule_once(self.callback_a, 1)
self.assertFalse(self.clock.call_scheduled_functions(0))
self.clock.set_time(1)
self.assertTrue(self.clock.call_scheduled_functions(0))
def test_call_sched_return_False_if_no_called_functions(self):
self.assertFalse(self.clock.call_scheduled_functions(0))
def test_tick_return_last_delta(self):
self.assertEqual(self.clock.tick(), 0)
self.time = 1
self.assertEqual(self.clock.tick(), 1)
self.time = 3
self.assertEqual(self.clock.tick(), 2)
@unittest.skip('Requires changes to the clock')
def test_get_sleep_time_None_if_no_items(self):
self.assertIsNone(self.clock.get_sleep_time())
@unittest.skip('Requires changes to the clock')
def test_get_sleep_time_can_sleep(self):
self.clock.schedule_once(self.callback_a, 3)
self.clock.schedule_once(self.callback_b, 1)
self.clock.schedule_once(self.callback_c, 6)
self.clock.schedule_once(self.callback_d, 7)
self.assertEqual(self.clock.get_sleep_time(), 1)
self.advance_clock()
self.assertEqual(self.clock.get_sleep_time(), 2)
self.advance_clock(2)
self.assertEqual(self.clock.get_sleep_time(), 3)
self.advance_clock(3)
self.assertEqual(self.clock.get_sleep_time(), 1)
@unittest.skip('Requires changes to the clock')
def test_get_sleep_time_cannot_sleep(self):
self.clock.schedule(self.callback_a)
self.clock.schedule_once(self.callback_b, 1)
self.assertEqual(self.clock.get_sleep_time(), 0)
@unittest.skip
def test_schedule_item_during_tick(self):
def replicating_event(dt):
self.clock.schedule(replicating_event)
counter()
counter = mock.Mock()
self.clock.schedule(replicating_event)
# one tick for the original event
self.clock.tick()
self.assertEqual(counter.call_count, 1)
# requires access to private member
self.assertEqual(len(self.clock._schedule_items), 2)
# one tick from original, then two for new
# now event queue should have two items as well
self.clock.tick()
self.assertEqual(counter.call_count, 3)
# requires access to private member
self.assertEqual(len(self.clock._schedule_items), 4)
def test_unschedule_interval_item_during_tick(self):
def suicidal_event(dt):
counter()
self.clock.unschedule(suicidal_event)
counter = mock.Mock()
self.clock.schedule_interval(suicidal_event, 1)
self.advance_clock()
self.assertEqual(counter.call_count, 1)
@unittest.skip
def test_schedule_interval_item_during_tick(self):
def replicating_event(dt):
self.clock.schedule_interval(replicating_event, 1)
counter()
counter = mock.Mock()
self.clock.schedule_interval(replicating_event, 1)
# advance time for the original event
self.advance_clock()
self.assertEqual(counter.call_count, 1)
# requires access to private member
self.assertEqual(len(self.clock._schedule_interval_items), 2)
# one tick from original, then two for new
# now event queue should have two items as well
self.advance_clock()
self.assertEqual(counter.call_count, 3)
# requires access to private member
self.assertEqual(len(self.clock._schedule_interval_items), 4)
def test_scheduler_integrity(self):
"""most tests in this suite do not care about which order
scheduled items are executed. this test will verify that
the order things are executed is correct.
"""
expected_order = [self.callback_a, self.callback_b,
self.callback_c, self.callback_d]
# schedule backwards to verify that they are scheduled correctly,
# even if scheduled out-of-order.
for delay, func in reversed(list(enumerate(expected_order, start=1))):
self.clock.schedule_once(func, delay)
for index, func in enumerate(expected_order, start=1):
self.advance_clock()
self.assertTrue(func.called)
self.assertFalse(any(i.called for i in expected_order[index:]))
def test_slow_clock(self):
"""pyglet's clock will not make up for lost time. in this case, the
interval scheduled for callback_[bcd] is 1, and 2 seconds have passed.
since pyglet won't make up for lost time, they are only called once.
"""
self.clock.schedule(self.callback_a)
self.clock.schedule_once(self.callback_b, 1)
self.clock.schedule_interval(self.callback_c, 1)
self.clock.schedule_interval_soft(self.callback_d, 1)
# simulate a slow clock
self.time = 2
self.clock.tick()
self.assertEqual(self.callback_a.call_count, 1)
self.assertEqual(self.callback_b.call_count, 1)
self.assertEqual(self.callback_c.call_count, 1)
self.assertEqual(self.callback_d.call_count, 1)
def test_slow_clock_reschedules(self):
"""pyglet's clock will not make up for lost time. in this case, the
interval scheduled for callback_[bcd] is 1, and 2 seconds have passed.
since pyglet won't make up for lost time (call events that missed their
execution time), they are only called once. this test verifies that
missed events are rescheduled and executed later
"""
self.clock.schedule(self.callback_a)
self.clock.schedule_once(self.callback_b, 1)
self.clock.schedule_interval(self.callback_c, 1)
self.clock.schedule_interval_soft(self.callback_d, 1)
# simulate slow clock
self.time = 2
self.clock.tick()
# simulate a proper clock (advance clock time by one)
frames = self.advance_clock()
# make sure our clock is at 3 seconds
self.assertEqual(self.time, 3)
# the +1 is the call during the slow clock period
self.assertEqual(self.callback_a.call_count, frames + 1)
# only scheduled to happen once
self.assertEqual(self.callback_b.call_count, 1)
# 2 because they 'missed' a call when the clock lagged
# with a good clock, this would be 3
self.assertEqual(self.callback_c.call_count, 2)
self.assertEqual(self.callback_d.call_count, 2)
@unittest.skip('Requires changes to the clock')
def test_get_interval(self):
self.assertEqual(self.clock.get_interval(), 0)
self.advance_clock(100)
self.assertEqual(round(self.clock.get_interval(), 10), self.interval)
def test_soft_scheduling_stress_test(self):
"""test that the soft scheduler is able to correctly soft-schedule
several overlapping events.
this test delves into implementation of the clock, and may break
"""
# this value represents evenly scheduled items between 0 & 1
# and what is produced by the correct soft-scheduler
expected = [0.0625, 0.125, 0.1875, 0.25, 0.3125, 0.375, 0.4375, 0.5,
0.5625, 0.625, 0.6875, 0.75, 0.8125, 0.875, 0.9375, 1]
for i in range(16):
self.clock.schedule_interval_soft(None, 1)
# sort the clock items
items = sorted(i.next_ts for i in self.clock._schedule_interval_items)
self.assertEqual(items, expected)
| nicememory/pie | pyglet/tests/unit/test_clock.py | Python | apache-2.0 | 13,923 |
import os, sys; sys.path.append(os.path.join("..", "..", ".."))
import re
from pattern.web import Google, URL
from pattern.web import Document, plaintext
# An interesting experiment on how to use the Google API
# and http://amplicate.com for opinion mining.
# (let's hope we get a real Amplicate API soon!)
query = "smurf"
# An example result, containing all the information we need:
# URL: http://amplicate.com/love/george-w-bush
# Title: <b>George</b> W <b>Bush</b> Hate - 64% People Agree (803 opinions)
for r in Google().search(query+" site:amplicate.com"):
u = URL(r.url)
if "love" in u.path \
or "hate" in u.path:
b = True
p = u.page.lower().replace("-", "")
for i, w in enumerate(query.lower().replace("-", " ").split()):
if i == 0 and not p.startswith(w):
b=False; break
if w not in p:
b=False; break
if b:
love = "love" in u.path
f = int(re.search("- ([0-9]{1,3})%", r.title).group(1)) * 0.01
n = int(re.search("\(([0-9]+) opinions", r.title).group(1))
print r.title
print r.url
print "love:", love and f or (1-f)
print "hate:", love and (1-f) or f
print "opinions:", int(round(n / f))
print
# Of course we can dig in deeper by following the link to r.url,
# but that would classify as screen-scraping.
#dom = Document(u.download())
#for p in dom.by_tag("p.comment-body"):
# print plaintext(p.content)
# print
#break
| agermanidis/Pattern | examples/01-web/10-amplicate.py | Python | bsd-3-clause | 1,645 |
import os
import re
import shutil
from datetime import datetime
import config
import database
import thetvdb
import utility
from file_matcher import SeriesMatcher
from file_matcher import MovieMatcher
RECENT_ADDITIONS = "_Recent Additions"
PY_TIVO_METADATA_EXT = ".txt"
SERIES_FILE_NAME='%s-s%02i_e%03i.%s'
MOVIE_FILE_NAME='%s (%i) [%i].%s'
MOVIE_WITH_DISC_FILE_NAME='%s (%i) Disc%s [%i].%s'
class FileManager():
def __init__(self, config, database, thetvdb, moviedb, debug):
self.database = database
self.thetvdb = thetvdb
self.moviedb = moviedb
self.debug = debug
self.lock_file_path = config.getLockFile()
self.recent_duration_in_minutes = config.getLibraryRecentDurationInMinutes()
self.recent_path = config.getLibraryRecentPath()
self.tv_genre_path = config.getLibraryTvGenrePath()
self.movie_genre_path = config.getLibraryMovieGenrePath()
self.media_file_extension_str = config.getMediaFileExtensions()
self.file_extensions = self.media_file_extension_str.split(',')
self.format = config.getLibraryFormat()
self.wait_from_file_creation_minutes = int(config.getWaitFromFileCreationInMinutes())
# create SeriesMatcher objects for each watched series
self.file_matchers = []
watched_series = self.database.get_watched_series()
for series in watched_series:
self.file_matchers.append(SeriesMatcher(config, series, debug))
#create the pattern for movie files
self.movie_matcher = MovieMatcher(moviedb, database, config, debug)
# returns a tuple of the form: (file_name, series, episode)
def match_file(self, file_path, skip_timecheck):
if skip_timecheck or self.is_time_to_process_file(file_path):
for file_matcher in self.file_matchers:
if file_matcher.matches_series_title(file_path):
if self.debug:
print "File '%s' matches series '%s'." % (file_path, file_matcher.series.title)
matches = file_matcher.match_episode(file_path)
for match in matches:
episode = match.get_episode_metadata(self.database, self.thetvdb)
if episode:
return (match.file_name, match.series, episode)
# if no matcher matches the file (or if matched episode doesn't exist), return None
return None
def match_movie_file(self, file_path, skip_timecheck):
if skip_timecheck or self.is_time_to_process_file(file_path):
return self.movie_matcher.match(file_path)
def is_time_to_process_file(self, file_path):
now = datetime.now()
create_time = datetime.fromtimestamp(os.path.getmtime(file_path))
minutes_from_creation = int((now - create_time).seconds / 60.0)
if minutes_from_creation > self.wait_from_file_creation_minutes:
return True
else:
print "Will not process file '%s' because it is too soon after file creation time. Minutes since creation: %s Minutes before processing: %s" % (file_path, minutes_from_creation, self.wait_from_file_creation_minutes)
return False
def generate_episode_metadata(self, episode):
if self.format == 'pyTivo':
if self.debug:
print 'Generating metadata for \'%s\' season %i episode %i in pyTivo format' % (episode.series.title, episode.season_number, episode.episode_number)
unformatted_metadata = episode.format_for_pyTivo(datetime.now())
to_return = []
for l in unformatted_metadata:
to_append = utility.unicode_to_ascii(l)
to_append = to_append + os.linesep
to_return.append(to_append)
return to_return
else:
print "Format '%s' is not a valid format.\n" % self.format
return None
def generate_movie_metadata(self, movie):
if self.format == 'pyTivo':
if self.debug:
print 'Generating metadata for movie \'%s\' in pyTivo format' % (movie.title, )
unformatted_metadata = movie.format_for_pyTivo()
to_return = []
for l in unformatted_metadata:
to_append = utility.unicode_to_ascii(l)
to_append = to_append + os.linesep
to_return.append(to_append)
return to_return
else:
print "Format '%s' is not a valid format.\n" % self.format
return None
def copy_media_to_library(self, input_file_path, library_path, library_file_name, move):
try:
full_output_path = os.path.join(library_path, library_file_name)
print "Adding file '%s' to the library.\n" % full_output_path,
if not os.path.exists(library_path):
os.makedirs(library_path)
if move:
shutil.move(input_file_path, full_output_path)
else:
shutil.copy(input_file_path, full_output_path)
return True
except:
return False
def clear_existing_metadata(self, library_path, library_file_name):
media_file_path = os.path.join(library_path, library_file_name)
if self.format == 'pyTivo':
meta_file_path = media_file_path + PY_TIVO_METADATA_EXT
else:
return
if os.path.exists(meta_file_path):
os.remove(meta_file_path)
def write_metadata(self, library_path, library_file_name, metadata):
media_file_path = os.path.join(library_path, library_file_name)
if self.format == 'pyTivo':
meta_file_path = media_file_path + PY_TIVO_METADATA_EXT
else:
return False
try:
meta_file = open(meta_file_path, "w")
try:
meta_file.writelines(metadata)
meta_file.flush()
finally:
meta_file.close()
except:
return False
return True
def add_to_recent(self, library_path, library_file_name):
recent_file_name = datetime.now().strftime('%Y-%m-%d_%H-%M-%S_') + library_file_name
media_file_path = os.path.join(library_path, library_file_name)
recent_file_path = os.path.join(self.recent_path, recent_file_name)
meta_file_path = None
if self.format == 'pyTivo':
meta_file_path = media_file_path + PY_TIVO_METADATA_EXT
recent_meta_file_path = recent_file_path + PY_TIVO_METADATA_EXT
try:
if not os.path.exists(self.recent_path):
os.makedirs(self.recent_path)
os.symlink(media_file_path, recent_file_path)
if meta_file_path is not None:
os.symlink(meta_file_path, recent_meta_file_path)
return True
except:
return False
def add_to_genres(self, genre_provider, genre_root_path, library_path, to_add_name, to_add_is_dir):
if genre_root_path is None:
return False
if genre_provider.genres is None or len(genre_provider.genres) == 0:
return False
if not os.path.exists(genre_root_path):
os.makedirs(genre_root_path)
media_path = os.path.join(library_path, to_add_name)
meta_path = None
if not to_add_is_dir and self.format == 'pyTivo':
meta_path = media_path + PY_TIVO_METADATA_EXT
for genre in genre_provider.genres:
if genre is not None and genre.strip() != '':
genre_path = os.path.join(genre_root_path, genre)
media_genre_path = os.path.join(genre_path, to_add_name)
if not os.path.exists(genre_path):
os.makedirs(genre_path)
if not os.path.exists(media_genre_path):
os.symlink(media_path, media_genre_path)
if not to_add_is_dir and self.format == 'pyTivo':
meta_genre_path = media_genre_path + PY_TIVO_METADATA_EXT
if not os.path.exists(meta_genre_path):
os.symlink(meta_path, meta_genre_path)
return True
def cleanup_recent_folder(self):
files = os.listdir(self.recent_path)
for f in files:
if self.is_media_file(f):
full_path = os.path.join(self.recent_path, f)
file_timestamp = os.path.getctime(full_path)
file_time = datetime.fromtimestamp(file_timestamp)
file_duration = datetime.now() - file_time
duration_in_minutes = file_duration.seconds/60 + file_duration.days*24*60
if int(duration_in_minutes) >= int(self.recent_duration_in_minutes):
print "Removing file '%s' from recent additions folder.\n" % full_path,
os.remove(full_path)
if self.format == 'pyTivo':
os.remove(full_path + PY_TIVO_METADATA_EXT)
def get_process_lock(self):
if os.path.exists(self.lock_file_path):
return None
else:
lock_file = open(self.lock_file_path, "w")
lock_file.write("locked")
lock_file.close()
return self.lock_file_path
def relinquish_process_lock(self):
if os.path.exists(self.lock_file_path):
os.remove(self.lock_file_path)
def is_media_file(self, file_name):
for e in self.file_extensions:
pattern = re.compile('^.*\.' + e + '$', re.I)
if not pattern.match(file_name) is None:
return True
return False
def get_series_library_file_name(self, file_name, episode):
(file, extension) = utility.split_file_name(file_name)
split_title = [w.strip().lower() for w in episode.series.title.split(' ')]
return SERIES_FILE_NAME % ('_'.join(split_title), episode.season_number, episode.episode_number, extension)
def get_movie_library_file_name(self, file_name, movie, disc):
(file, extension) = utility.split_file_name(file_name)
if disc is None:
return MOVIE_FILE_NAME % (movie.title, movie.movie_year, movie.id, extension)
else:
return MOVIE_WITH_DISC_FILE_NAME % (movie.title, movie.movie_year, disc, movie.id, extension)
def get_library_path(self, library_base_path, episode):
title = episode.series.title
season = episode.season_number
return os.path.join(library_base_path, title, "Season %02i" % (season,) )
| mrtrumbe/meliman | file_manager.py | Python | bsd-3-clause | 10,724 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This is part of Pybble, a WMS (Whatever Management System) based on
## Jinja2/Haml, Werkzeug, Flask, and Optimism.
##
## Pybble is Copyright © 2009-2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.md` for details,
## including an optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
# a couple of internal constants
LEN_NAME=50
LEN_DOC=1000
LEN_PATH=4096
LEN_CONTENT=100000
LEN_JSON=100000
LEN_USERNAME=30
LEN_PERSONNAME=40
LEN_DOMAIN=150
LEN_EMAIL=200
LEN_TYPE=20
LEN_EXT=8
LEN_CRYPTPW=100
| smurfix/pybble | pybble/core/models/__init__.py | Python | gpl-3.0 | 875 |
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
This is a script that gathers tags from instances and reports the status of the tags to zabbix
Usage:
ops-ec2-check-tags.py --aws-creds-profile profile1 --clusterid=testcluster --region=us-east-1
"""
# Ignoring module name
# pylint: disable=invalid-name
import os
import argparse
import requests
# Reason: disable pylint import-error because our libs aren't loaded on jenkins.
# Status: temporary until we start testing in a container where our stuff is installed.
# pylint: disable=import-error
from openshift_tools.monitoring.metric_sender import MetricSender
# uncomment this for realsie
from openshift_tools.cloud.aws.instance_util import InstanceUtil
CONFIG_LOOP_TAG_KEY = 'config_loop.enabled'
class AWSTagsMonitorCLI(object):
""" Responsible for parsing cli args and running the snapshotter. """
def __init__(self):
""" initialize the class """
self.args = None
self.parse_args()
@staticmethod
def get_current_az():
""" Returns the Availability Zone that the instance is in. """
availability_zone = requests.get('http://169.254.169.254/latest/meta-data/placement/availability-zone').text
return availability_zone
@staticmethod
def get_current_region():
""" Returns the region that the instance is in. """
availability_zone = AWSTagsMonitorCLI.get_current_az()
region = availability_zone[0:-1]
return region
def parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='AWS Tag Checker')
parser.add_argument('--aws-creds-profile', required=False,
help='The AWS credentials profile to use.')
parser.add_argument('--clusterid', required=False,
help='The clusterid of items to check')
parser.add_argument('--dry-run', action='store_true', default=False,
help='Say what would have been done, but don\'t actually do it.')
parser.add_argument('--region', required=False,
help='The clusterid of items to check')
self.args = parser.parse_args()
def main(self):
""" main function """
if not self.args.region:
self.args.region = AWSTagsMonitorCLI.get_current_region()
if self.args.aws_creds_profile:
os.environ['AWS_PROFILE'] = self.args.aws_creds_profile
instance_util = InstanceUtil(self.args.region, True)
if self.args.clusterid:
instances = instance_util.get_all_instances_as_dict(filters={"tag:clusterid" : self.args.clusterid})
else:
instances = instance_util.get_all_instances_as_dict()
tags = []
# This will print out a list of instances
# and the tags associated with them
for v in instances.itervalues():
# Skip scale group nodes because they don't have names,
# and their tags are managed by the scale group
if v.tags.get('scalegroup') == 'True':
continue
# Pre-3.11 masters will have 'Name' tags, but newer ones won't.
if 'Name' not in v.tags:
# If the Name tag is missing, add one.
v.tags['Name'] = "{}-{}-{}".format(v.tags['clusterid'], v.tags['host-type'], v.private_dns_name)
print v.id + ":"
for name, value in v.tags.iteritems():
print " %s: %s" %(name, value)
print
tags.append(v.tags)
print "Sending results to Zabbix:"
if self.args.dry_run:
print " *** DRY RUN, NO ACTION TAKEN ***"
else:
AWSTagsMonitorCLI.report_tags_to_zabbix(tags)
@staticmethod
def report_tags_to_zabbix(tags):
""" Sends the commands exit code to zabbix. """
mts = MetricSender(verbose=True)
#######################################################
# This reports the "config" tag from each instance
# If config ~= "true", report 0
# If config ~= "false", report 1
# If config not found, report 2
#######################################################
for tag in tags:
if 'config' in tag.keys():
if tag['config'].lower() == "true":
config_value = 0
else:
config_value = 1
else:
config_value = 2
mts.add_metric({CONFIG_LOOP_TAG_KEY : config_value}, host=tag['Name'])
####################################
# End of config tag checking
####################################
# Actually send them
mts.send_metrics()
if __name__ == "__main__":
AWSTagsMonitorCLI().main()
| blrm/openshift-tools | scripts/monitoring/ops-ec2-check-tags.py | Python | apache-2.0 | 4,882 |
"""
Andrew Hill
MacArthur Lab - 2014
Functions for handling remapping between HGVS and genomic coordinates.
"""
import hgvs
import hgvs.utils
from pygr.seqdb import SequenceFileDB
import os
import sys
import re
from macarthur_core.io import file_io
from macarthur_core.lovd import utilities
class VariantRemapper:
"""
Class containing functions for remapping of variants from HGVS to genomic coordinate notation.
"""
def __init__(self):
"""
Initializes hg19 reference and reference transcripts
"""
genome_path = os.path.join(os.path.dirname(__file__), 'resources', 'hg19.fa')
refseq_path = os.path.join(os.path.dirname(__file__), 'resources', 'genes.refGene')
# Read genome sequence using pygr.
self.genome = SequenceFileDB(genome_path)
# Read RefSeq transcripts into a python dict.
with open(refseq_path) as infile:
self.transcripts = hgvs.utils.read_transcripts(infile)
def hgvs_to_vcf(self, hgvs_variant):
"""
Converts a single variant provided in HGVS notation to genomic coordinate notation.
See U(https://humgenprojects.lumc.nl/trac/mutalyzer/wiki/PositionConverter) for more information on acceptable
inputs and outputs, and remapping functionality.
@param hgvs_variant: HGVS description of variant, such as NM_001100.3:c.137T>C. The portion prior to the colon is
the refseqID used as the reference for the variant The portion after the colon is an HGVS-style description
of the mutation (a SNP from T to C at location 137 in the example above.
@type variant: string
@return: A tuple (chromosome_number, coordinate, ref, alt) in that order denoting the VCF notation of the variant
@rtype: tuple of strings
"""
# Library requires string not unicode, ensure format is correct
hgvs_variant = str(hgvs_variant)
chromosome_number, coordinate, ref, alt = hgvs.parse_hgvs_name(hgvs_variant, self.genome, get_transcript=self._get_transcript)
chromosome_number = re.match('chr(.+)', chromosome_number).group(1)
coordinate = str(coordinate)
return chromosome_number, coordinate, ref, alt
def vcf_to_hgvs(self, reference_transcript, vcf_notation):
"""
Converts a single VCF notation variant to HGVS notation relative to a given transcript.
@param reference_transcript: the refseq id of the reference transcript to use for HGVS notation
@type reference_transcript: string
@param vcf_notation: a tuple containing elements chromosome_number, coordinate, ref, and alt in that order
@type vcf_notation: tuple of strings
@return: hgvs notatation of variant in format reference_transcript:hgvs_description
@rtype: string
"""
chromosome_number, coordinate, ref, alt = vcf_notation
coordinate = int(coordinate)
transcript = self._get_transcript(reference_transcript)
return hgvs.format_hgvs_name(chromosome_number, coordinate, ref, alt, self.genome, transcript)
def _get_transcript(self, name):
"""
Callback to provide reference transcript by its name
@param name: name of reference transcript
@type name: string
@return: line of information on transcript from resource file
"""
return self.transcripts.get(name)
def generate_vcf_from_hgvs(input_file, output_file, hgvs_column, protein_change_column, column_delimiter='\t'):
"""
Generate VCF files from files containing variants in HGVS notation. First row must contain column labels.
@param input_file: path to input file containing HGVS variants.
@type input_file: string
@param output_file: path to output file for VCF file
@type output_file: string
@param hgvs_column: column label for HGVS notation column
@type hgvs_column: string
@param protein_change_column: column label for predicted protein change column
@type protein_change_column: string
@param column_delimiter: column delimiter to use for input file
@type column_delimiter: string
@return: list of remapping error information. Each entry is [file, hgvs_notation, error].
@rtype: 2D list
"""
remapper = VariantRemapper()
remapping_errors = []
table_data = file_io.read_table_from_file(input_file, column_delimiter=column_delimiter)
header = table_data.pop(0)
# Isolate data columns with HGVS mutations and protein change
hgvs_notation_index = utilities.find_string_index(header, hgvs_column)
hgvs_notation = []
protein_change_index = utilities.find_string_index(header, protein_change_column)
protein_change = []
# Remap Variants and build list for INFO column tags
vcf_notation_variants = []
for row in table_data:
try:
vcf_notation = remapper.hgvs_to_vcf(row[hgvs_notation_index])
vcf_notation_variants.append(vcf_notation)
hgvs_notation.append(row[hgvs_notation_index])
protein_change.append(row[protein_change_index])
except Exception as e:
remapping_errors.append([file, row[hgvs_notation_index], str(e)])
info_column_tags = {'HGVS': ('string', 'LOVD HGVS notation describing DNA change', hgvs_notation),
'LAA_CHANGE': ('string', 'LOVD amino acid change', protein_change)}
# Write VCF file
vcf_file_name = os.path.splitext(file)[0] + '.vcf'
vcf_file_text = file_io.format_vcf_text(vcf_notation_variants, info_column_tags)
file_io.write_table_to_file(vcf_file_name, vcf_file_text)
return remapping_errors | macarthur-lab/leiden_sc | leiden_sc/macarthur_core/remapping/remapping.py | Python | bsd-3-clause | 5,674 |
import traces
import pytest
@pytest.mark.mpl_image_compare(
savefig_kwargs={'bbox_inches': 'tight', 'dpi': 300},
remove_text=True,
style='ggplot',
tolerance=20,
)
def test_plot():
ts = traces.TimeSeries()
ts[0] = 0
ts[1] = 2
ts[3] = 1
ts[5] = 0
figure, axes = ts.plot()
return figure
def test_optional_import():
# TODO: something like this https://stackoverflow.com/a/51048604/1431778
pass
def test_invalid_call():
ts = traces.TimeSeries()
ts[0] = 0
ts[1] = 1
ts.plot(interpolate='previous')
ts.plot(interpolate='linear')
with pytest.raises(ValueError):
ts.plot(interpolate='yomama')
def test_empty():
ts = traces.TimeSeries()
ts.plot()
| datascopeanalytics/traces | tests/test_plot.py | Python | mit | 760 |
"""
This tutorial introduces denoising auto-encoders (dA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd_test import load_data
from utils import tile_raster_images
try:
import PIL.Image as Image
except ImportError:
import Image
class dA(object):
"""Denoising Auto-Encoder class (dA)
A denoising autoencoders tries to reconstruct the input from a corrupted
version of it by projecting it first in a latent space and reprojecting
it afterwards back in the input space. Please refer to Vincent et al.,2008
for more details. If x is the input then equation (1) computes a partially
destroyed version of x by means of a stochastic mapping q_D. Equation (2)
computes the projection of the input into the latent space. Equation (3)
computes the reconstruction of the input, while equation (4) computes the
reconstruction error.
.. math::
\tilde{x} ~ q_D(\tilde{x}|x) (1)
y = s(W \tilde{x} + b) (2)
x = s(W' y + b') (3)
L(x,z) = -sum_{k=1}^d [x_k \log z_k + (1-x_k) \log( 1-z_k)] (4)
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
input=None,
n_visible=128 * 128,
n_hidden=500,
W=None,
bhid=None,
bvis=None
):
"""
Initialize the dA class by specifying the number of visible units (the
dimension d of the input ), the number of hidden units ( the dimension
d' of the latent or hidden space ) and the corruption level. The
constructor also receives symbolic variables for the input, weights and
bias. Such a symbolic variables are useful when, for example the input
is the result of some computations, or when weights are shared between
the dA and an MLP layer. When dealing with SdAs this always happens,
the dA on layer 2 gets as input the output of the dA on layer 1,
and the weights of the dA are used in the second stage of training
to construct an MLP.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: number random generator used to generate weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type input: theano.tensor.TensorType
:param input: a symbolic description of the input or None for
standalone dA
:type n_visible: int
:param n_visible: number of visible units
:type n_hidden: int
:param n_hidden: number of hidden units
:type W: theano.tensor.TensorType
:param W: Theano variable pointing to a set of weights that should be
shared belong the dA and another architecture; if dA should
be standalone set this to None
:type bhid: theano.tensor.TensorType
:param bhid: Theano variable pointing to a set of biases values (for
hidden units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
:type bvis: theano.tensor.TensorType
:param bvis: Theano variable pointing to a set of biases values (for
visible units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# note : W' was written as `W_prime` and b' as `b_prime`
if not W:
# W is initialized with `initial_W` which is uniformely sampled
# from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)
),
dtype=theano.config.floatX
)
W = theano.shared(value=initial_W, name='W', borrow=True)
if not bvis:
bvis = theano.shared(
value=numpy.zeros(
n_visible,
dtype=theano.config.floatX
),
borrow=True
)
if not bhid:
bhid = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='b',
borrow=True
)
self.W = W
# b corresponds to the bias of the hidden
self.b = bhid
# b_prime corresponds to the bias of the visible
self.b_prime = bvis
# tied weights, therefore W_prime is W transpose
self.W_prime = self.W.T
self.theano_rng = theano_rng
# if no input is given, generate a variable representing the input
if input is None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.x = T.dmatrix(name='input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime]
def get_corrupted_input(self, input, corruption_level):
"""This function keeps ``1-corruption_level`` entries of the inputs the
same and zero-out randomly selected subset of size ``coruption_level``
Note : first argument of theano.rng.binomial is the shape(size) of
random numbers that it should produce
second argument is the number of trials
third argument is the probability of success of any trial
this will produce an array of 0s and 1s where 1 has a
probability of 1 - ``corruption_level`` and 0 with
``corruption_level``
The binomial function return int64 data type by
default. int64 multiplicated by the input
type(floatX) always return float64. To keep all data
in floatX when floatX is float32, we set the dtype of
the binomial to floatX. As in our case the value of
the binomial is always 0 or 1, this don't change the
result. This is needed to allow the gpu to work
correctly as it only support float32 for now.
"""
return self.theano_rng.binomial(size=input.shape, n=1,
p=1 - corruption_level,
dtype=theano.config.floatX) * input
def get_hidden_values(self, input):
""" Computes the values of the hidden layer """
return T.nnet.sigmoid(T.dot(input, self.W) + self.b)
def get_reconstructed_input(self, hidden):
"""Computes the reconstructed input given the values of the
hidden layer
"""
return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
def get_cost_updates(self, corruption_level, learning_rate):
""" This function computes the cost and the updates for one trainng
step of the dA """
tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values(tilde_x)
z = self.get_reconstructed_input(y)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
# note : L is now a vector, where each element is the
# cross-entropy cost of the reconstruction of the
# corresponding example of the minibatch. We need to
# compute the average of all these to get the cost of
# the minibatch
cost = T.mean(L)
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
]
return (cost, updates)
def test_dA(learning_rate=0.1, training_epochs=50,
dataset='mnist.pkl.gz',
batch_size=20, output_folder='dA_plots'):
"""
This demo is tested on MNIST
:type learning_rate: float
:param learning_rate: learning rate used for training the DeNosing
AutoEncoder
:type training_epochs: int
:param training_epochs: number of epochs used for training
:type dataset: string
:param dataset: path to the picked dataset
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# start-snippet-2
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
# end-snippet-2
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
os.chdir(output_folder)
####################################
# BUILDING THE MODEL NO CORRUPTION #
####################################
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
da = dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=x,
n_visible=128 * 128,
n_hidden=500
)
cost, updates = da.get_cost_updates(
corruption_level=0.,
learning_rate=learning_rate
)
train_da = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size]
}
)
start_time = timeit.default_timer()
############
# TRAINING #
############
# go through training epochs
for epoch in xrange(training_epochs):
# go through trainng set
c = []
for batch_index in xrange(n_train_batches):
c.append(train_da(batch_index))
print 'Training epoch %d, cost ' % epoch, numpy.mean(c)
end_time = timeit.default_timer()
training_time = (end_time - start_time)
print >> sys.stderr, ('The no corruption code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((training_time) / 60.))
# image = Image.fromarray(
# tile_raster_images(X=da.W.get_value(borrow=True).T,
# img_shape=(128, 128), tile_shape=(10, 10),
# tile_spacing=(1, 1)))
# image.save('filters_corruption_0.png')
# print train_set_x.get_value(borrow=True).shape
# sample = train_set_x.get_value(borrow=True)[0]
# print sample.shape
# print da.get_hidden_values(sample)
# W = da.W.get_value(borrow=True).T
# print da.W.get_value(borrow=True).T.shape
# print da.W.get_value(borrow=True).T[0].shape
#sample = T.ivector('sample')
#sample = T.matrix('sample')
index_1 = T.lscalar() # index to a [mini]batch
index_2 = T.lscalar() # index to a [mini]batch
getHV = da.get_hidden_values(x)
getHiddenValues = theano.function(
[index_1,index_2],
getHV,
givens={
x: train_set_x[index_1:index_2]
}
)
#print getHiddenValues(0,1).shape
import cPickle
from fetex_image import FetexImage
pkl_file = open('/Applications/MAMP/htdocs/DeepLearningTutorials/data/im_index.pkl', 'rb')
im_index = cPickle.load(pkl_file)
data_path = '/Applications/MAMP/htdocs/DeepLearningTutorials/data/'
#store = pd.HDFStore('/Applications/MAMP/htdocs/DeepLearningTutorials/data/df_images.h5', 'r')
fe = FetexImage(verbose=True,support_per_class=100,data_path=data_path, dataset='categories', mode='RGB')
fe.im_index = im_index
# print im_index[0]
# print im_index[1]
X_compressed = getHiddenValues(0,100)
print X_compressed.shape
fe.similarImages(X_compressed,pca=False)
# print getHiddenValues(0,1).shape
# print sum(X_compressed[0])
# print sum(getHiddenValues(1,2)[0])
#print sum(getHiddenValues(100,101)[0])
# start-snippet-3
#####################################
# BUILDING THE MODEL CORRUPTION 30% #
#####################################
# rng = numpy.random.RandomState(123)
# theano_rng = RandomStreams(rng.randint(2 ** 30))
# da = dA(
# numpy_rng=rng,
# theano_rng=theano_rng,
# input=x,
# n_visible=128 * 128,
# n_hidden=500
# )
# cost, updates = da.get_cost_updates(
# corruption_level=0.3,
# learning_rate=learning_rate
# )
# train_da = theano.function(
# [index],
# cost,
# updates=updates,
# givens={
# x: train_set_x[index * batch_size: (index + 1) * batch_size]
# }
# )
# start_time = timeit.default_timer()
# ############
# # TRAINING #
# ############
# # go through training epochs
# for epoch in xrange(training_epochs):
# # go through trainng set
# c = []
# for batch_index in xrange(n_train_batches):
# c.append(train_da(batch_index))
# print 'Training epoch %d, cost ' % epoch, numpy.mean(c)
# end_time = timeit.default_timer()
# training_time = (end_time - start_time)
# print >> sys.stderr, ('The 30% corruption code for file ' +
# os.path.split(__file__)[1] +
# ' ran for %.2fm' % (training_time / 60.))
# # end-snippet-3
# # start-snippet-4
# image = Image.fromarray(tile_raster_images(
# X=da.W.get_value(borrow=True).T,
# img_shape=(128, 128), tile_shape=(10, 10),
# tile_spacing=(1, 1)))
# image.save('filters_corruption_30.png')
# # end-snippet-4
# print da.W.get_value(borrow=True).T
os.chdir('../')
if __name__ == '__main__':
test_dA()
| webeng/DeepLearningTutorials | code/dA_v2.py | Python | bsd-3-clause | 16,442 |
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GSoCProject Model."""
from google.appengine.ext import db
from google.appengine.ext import ndb
from django.utils.translation import ugettext
from soc.modules.gsoc.models import code_sample as code_sample_model
import soc.modules.gsoc.models.proposal
import soc.models.program
import soc.models.organization
# constants with possible statuses of projects
# the project has been accepted into the program
STATUS_ACCEPTED = 'accepted'
# the project has failed one of evaluations
STATUS_FAILED = 'failed'
# the project has been withdrawn
STATUS_WITHDRAWN = 'withdrawn'
# the project has been marked as invalid
STATUS_INVALID = 'invalid'
class GSoCProject(db.Model):
"""Model for a GSoC project used in the GSoC workflow.
Parent:
soc.modules.gsoc.models.profile.Profile
"""
#: Required field indicating the "title" of the project
title = db.StringProperty(required=True,
verbose_name=ugettext('Title'))
title.help_text = ugettext('Title of the project')
#: Required, text field describing the project
abstract = db.TextProperty(
required=True, verbose_name=ugettext('Project abstract'))
abstract.help_text = ugettext(
'Short abstract, summary, or snippet;'
' 500 characters or less, plain text displayed publicly')
#: Text field containing all kinds of information about this project
public_info = db.TextProperty(
required=False, default='',
verbose_name=ugettext('Additional information'))
public_info.help_text = ugettext(
'Additional information about this project to be shown publicly')
#: Optional, URL which can give more information about this project
additional_info = db.URLProperty(
required=False, verbose_name=ugettext('External resource URL'))
additional_info.help_text = ugettext(
'Link to a resource containing more information about this project.')
#: Optional field storing a feed URL; displayed publicly
feed_url = db.LinkProperty(
verbose_name=ugettext('Project Feed URL'))
feed_url.help_text = ugettext(
'The URL should be a valid ATOM or RSS feed. '
'Feed entries are shown on the public page.')
#: The project can be marked to be featured on program home page.
is_featured = db.BooleanProperty(default=False, required=True,
verbose_name=ugettext('Featured'))
is_featured.help_text = ugettext(
'Should this project be featured on the program homepage.')
#: A property containing a list of Mentors assigned for this project
mentors = db.ListProperty(item_type=db.Key, default=[], required=True)
def getMentors(self):
"""Returns a list of profile_model.GSoCProfile entities which
are mentors for this project.
Returns:
list of mentors for this project
"""
mentor_ndb_keys = map(ndb.Key.from_old_key, self.mentors)
return [mentor for mentor in ndb.get_multi(mentor_ndb_keys) if mentor]
#: The status of this project
status = db.StringProperty(required=True, default=STATUS_ACCEPTED,
choices=[STATUS_ACCEPTED, STATUS_FAILED,
STATUS_WITHDRAWN, STATUS_INVALID])
#: List of all processed GradingRecords which state a pass for this project.
#: This property can be used to determine how many evaluations someone has
#: passed. And is also used to ensure that a GradingRecord has been
#: processed.
passed_evaluations = db.ListProperty(item_type=db.Key, default=[])
#: List of all processed GradingRecords which state a fail for this project.
#: This is a ListProperty to ensure that the system keeps functioning when
#: manual changes in GradingRecords occur.
failed_evaluations = db.ListProperty(item_type=db.Key, default=[])
#: Organization which this project is in
org = db.ReferenceProperty(
reference_class=soc.models.organization.Organization,
required=True, collection_name='student_projects')
#: Program in which this project has been created
program = db.ReferenceProperty(
reference_class=soc.models.program.Program, required=True,
collection_name='projects')
#: Proposal to which this project corresponds to
proposal = db.ReferenceProperty(
reference_class=soc.modules.gsoc.models.proposal.GSoCProposal,
required=False,
collection_name='projects')
#: Whether the student has submitted their code samples or not
code_samples_submitted = db.BooleanProperty(default=False)
def codeSamples(self):
"""Returns code_sample.GSoCCodeSample entities uploaded for this project.
Returns:
code sample entities for this project
"""
query = code_sample_model.GSoCCodeSample.all()
query.ancestor(self)
return query.fetch(1000)
def countCodeSamples(self):
"""Returns number of code_sample.GSoCCodeSample entities uploaded
for this project.
Returns:
number of code samples uploaded for this project
"""
query = code_sample_model.GSoCCodeSample.all(keys_only=True)
query.ancestor(self)
return query.count()
| rhyolight/nupic.son | app/soc/modules/gsoc/models/project.py | Python | apache-2.0 | 5,610 |
from __future__ import division
fornac_template = """
<!DOCTYPE html>
<meta charset="utf-8">
This is an RNA container.
<div id='rna_ss'> </div>
This after the RNA container.
<link rel='stylesheet' type='text/css' href='https://raw.githubusercontent.com/pkerpedjiev/fornac/master/css/fornac.css' />
<script type='text/javascript' src='https://code.jquery.com/jquery-2.1.4.min.js'></script>
<script type='text/javascript' src='https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.6/d3.min.js'></script>
<script type='text/javascript' src='https://rawgit.com/pkerpedjiev/fornac/master/js/fornac.js'></script>
<script type='text/javascript'>
var container = new fornac.FornaContainer("#rna_ss",
{{'applyForce': true, 'allowPanningAndZooming': true, "initialSize": [500,800], 'cssFileLocation': "https://raw.githubusercontent.com/pkerpedjiev/fornac/master/css/fornac.css"}});
var options = {{'structure': '{}',
'sequence': '{}'}};
colorStr = "{}"
container.addRNA(options.structure, options);
cs = ColorScheme(colorStr);
container.addCustomColors(cs.colorsJson);
container.changeColorScheme('custom');
</script>
"""
def create_fornac_page_for_structure(bg, color_string):
"""
Create a fornac page dispalying this structure. The dotbracket string
and sequence will automatically be extracted from the BulgeGraph.
Colors can be specified as a dictionary containing floating
point values. These will be uniformly scaled according to the color
scale passed in.
:param color_string: The color string to be passed to fornac.
e.g. "11-12: red 14-17: rgb(14,15,120)"
:return: The html text of the resulting web page.
"""
return fornac_template.format(bg.to_dotbracket_string(),
bg.seq, color_string)
def scale_colors(colors_dict, cmap=None, reverse=False):
'''
A dictionary with values containing scalars which need to
be scaled according to some color map. The keys are irrelevant.
The color map will be normalized to the range of values within
color_dict.
:param colors_dict: The dictionary containing the values to be color scaled.
:param cmap: A color map to be used to scale the colors.
:param reverse: Reverse the color map
:return: Another dictionary containing rgb triples as values.
'''
if cmap is None:
import matplotlib.pyplot as plt
cmap = plt.get_cmap('Blues')
values = colors_dict.values()
min_value = min(values)
max_value = max(values)
new_dict = {}
for key in colors_dict:
if reverse:
color = cmap(
1 - ((colors_dict[key] - min_value) / (max_value - min_value)))
else:
color = cmap(
(colors_dict[key] - min_value) / (max_value - min_value))
new_dict[key] = (int(255 * color[0]),
int(255 * color[1]), int(255 * color[2]))
return new_dict
def element_to_nucleotide_colors(bg, element_colors):
'''
Convert a dictionary of per-element colors to a dictionary of per-nucleotide colors
:param element_colors: A dictionary of element colors (e.g. {'i0': (255,0,0), 'm1': {255,255,255)}
:return: A dictionary of nucleotide numbers: (e.g {1: (14,15,120), 2: (255,255,255)})
'''
new_dict = {}
for key in element_colors:
for res in bg.define_residue_num_iterator(key):
new_dict[res] = element_colors[key]
return new_dict
def nucleotide_colors_to_fornac_color_string(nucleotide_colors):
'''
Convert a dictionary of per nucleotide colors to a fornac
color string.
:param nucleotide_colors: A dictionary with nucleotide numbers as keys and colors as values.
(e.g. {1: (255,0,0), 2: (255,255,0)})
:return: A color string (e.g "1:rgb(255,0,0) 2:rgb(255,0,0)")
'''
color_string = ""
for key in nucleotide_colors:
color_string += "{}:rgb({},{},{}) ".format(key, nucleotide_colors[key][0],
nucleotide_colors[key][1],
nucleotide_colors[key][2])
return color_string
| ViennaRNA/forgi | forgi/visual/fornac.py | Python | gpl-3.0 | 4,307 |
# The code below is written from the following youtube tutorial
# https://www.youtube.com/playlist?list=PLRyu4ecIE9tibdzuhJr94uQeKnOFkkbq6
# References for the variables used
# shape/ layerSize is architecture of the neural network. ex: (4,3,2,1) means 4 units at input, 3 units at hidden layer 1, 2 units at hidden layer 2, 1 unit at output
# layerCount is number of layers in the network excluding input layer (as it is just the buffer to hold input)
# for shape = (4,3,2,1), layerCount is 3.
# _layerInput is the List containing preactivation of each layer. (excluding input layer and including the output layer)
# _layerInput[0] contains the preactivation of the hidden layer 1
# _layerInput[layerCount - 1] contains the preactivation of the output layer
# each _layerInput[i] is a matrix, and
# j'th column of _layerInput[i] contains preactivation of (i+1)'th hidden layer for j'th training example.
# _layerOutput is the List containing activation of each layer. (excluding input layer and including the output layer)
# _layerOutput[0] contains the activation of the hidden layer 1
# _layerOutput[layerCount - 1] contains the activation of the output layer i.e. this contains the output
# outputDelta difference at the output layer for all training examples. j'th column contains the cordinate wise difference at the output for j'th training example
# outputDelta is also the gradient at output if we take the least square error function
# error sum of the squared error at the output layer for all the training examples. This is a number.
# delta is the List of gradient at preactivation starting from preactivation at output to preactivation at the 1st hidden layer
# delta[0] is the matrix of gradient at preactivation of the output layer
# delta[layerCount-1] is the matrix of gradient at preactivation of 1'st hidden layer
# delta[i] is the matrix of gradient at preactivation of (layerCount-i)'th hidden layer
# j'th column of delta[i] contains the gradient at preactivation of (layerCount-i)'th hidden layer for j'th training example.
# deltaPullback is the matrix of the gradient at the activation of a layer
# Note: for gradient at the activation of i'th hidden layer, we need Wi matrix and the gradient of preactivation at (i+1) hidden layer
# deltaPullback is the matrix where j'th column shows the gradient of the activation for j'th training example.
import numpy as np
class backPropagationNetwork:
layerCount = 0
shape = None
weights = []
# constructor function for the class
def __init__(self, layerSize):
# layerSize is the Architecture of the NN as (4,3,1)
self.layerCount = len(layerSize)-1 # input layers is just a buffer
self.shape = layerSize
# for the forward pass maybe.
self._layerInput = []
self._layerOutput = []
self._previousWeightDelta = []
# for a (4,3,2) layers the weight matrix will be of size
# 3X5 (w1), 2X4 (w2)
# zip just makes the list of tupples cordinate wise
for(l1,l2) in zip(layerSize[:-1],layerSize[1:]):
self.weights.append(np.random.normal(scale=0.1,size=(l2,l1+1)))
# add for each weight matrix a matrix in _previousWeightDelta for previous values
self._previousWeightDelta.append(np.zeros(shape=(l2,l1+1)))
# for sigmoid units return the function value at x or the value of the derivative at x
# dependeing upon derivative flag
def sgm(self,x,derivative=False):
if not derivative:
return 1/(1+np.exp(-x))
else:
out = self.sgm(x)
return out*(1-out)
# forward run/pass
def run(self,X):
# no of training examples
m = X.shape[0]
# initialize/ clear out the input and output list from previous run
self._layerInput = []
self._layerOutput = []
# Forward pass
for i in range(self.layerCount):
if i == 0:
layerInput = self.weights[0].dot(np.vstack([X.T, np.ones([1,m])])) # vstack(a,b) stacks matrix/vector b below matrix/vector a
else:
layerInput = self.weights[i].dot(np.vstack([self._layerOutput[-1], np.ones([1,m])]))
self._layerInput.append(layerInput)
self._layerOutput.append(self.sgm(layerInput))
return self._layerOutput[-1].T
def trainEpoch(self,X,Y,trainingRate = 0.2,momentum = 0.5):
# trains the network for one epoch
delta = []
m = X.shape[0]
# forward pass before we can compute the gradient by back propagation
self.run(X)
# Computing the deltas for the preactivation at each layer including the output
# for the sqaure error and the sigmoid activation units
# delta(for preactivation) = delta(for activation)*(sigm(preactivation)(1-sigm(preactivation)))
for i in reversed(range(self.layerCount)): # reverse as the backpropogation work in reverse order
if i == self.layerCount-1: # if this is for the preactivation at the output
outputDelta = self._layerOutput[i] - Y.T # this is also the gradient at output if we take the least square error function
error = np.sum(outputDelta**2) # sum of all the elements along all dimensions
delta.append(outputDelta*self.sgm(self._layerInput[i],True)) # '*' operator is for coordinate wise multiplication
else:
deltaPullback = self.weights[i+1].T.dot(delta[-1]) # this is the gradient at the activation of the hidden layer (i+1), note that i = 0
# is for hidden layer 1.
delta.append(deltaPullback[:-1,:]*self.sgm(self._layerInput[i],True)) # this is the gradient at the preactivation at hidden layer (i+1)
# deltaPullback is the matrix where j'th column shows the gradient of the activation for j'th training example.
# Now the last row of the deltaPullback matrix is for the gradients of the activation for bias units we add to each hidden layer
# We know to compute the gradient at the preactivation from the gradient at the activation, we need preactivation first
# and for bias units we dun have preactivation (i.e we dun have value of preactivation of bias units in self._layerInput[i])
# We should remove this row as while calculating the delta of the layer (gradient at preactivation)
# compute the weight delta (gradient)
# If a weight matirx connects layer i to layer i+1, for computing gradient w.r.t weights we need activation at layer i and preactivation at layer i+1.
# here delta will have preactivation and layerOutput will have the activation for the required layers
for i in range(self.layerCount):
deltaIndex = self.layerCount - 1 - i # delta[0] is preactivation at output and so on in backward direction
if i == 0:
layerOutput = np.vstack([X.T,np.ones([1,m])]) # for W0 the delta (preactivation) is input layers
else:
layerOutput = np.vstack([self._layerOutput[i-1],np.ones([1,self._layerOutput[i-1].shape[1]])]) # _layerOutput[0] contains the activation of the hidden layer 1 and so for Wi we need _layerOutput[i-1]
weightDelta = np.sum(layerOutput[None,:,:].transpose(2,0,1)*delta[deltaIndex][None,:,:].transpose(2,1,0),axis=0)
# layerOutput[None,:,:].transpose(2,0,1) -> each column Cj in layerOutput (which is an output/activation matrix for some layer) will be transposed in a different layers where each
# layer has this column Cj as one row.
# Check (1 X m X n).transpose(2,0,1) = (n X 1 X m)
# delta[deltaIndex].transpose(2,1,0) -> each column Cj in delta[deltaIndex] (which is an input/preactivation matrix for some layer) will be transposed in a different layers where each
# layer has this column Cj as one column.
# Check (1 X m X n).transpose(2,1,0) = (n X m X 1)
# weightDelta will have the gradients for the weight matrix for the current iteration. Each layer will have the gradient weight matrix for a training example for current layer
trainingRate = 0.5
# self.weights[i] -= trainingRate * weightDelta : this was if we have not applied momentum
weightDelta = trainingRate * weightDelta + momentum*self._previousWeightDelta[i]
self.weights[i] -= weightDelta
self._previousWeightDelta[i] = weightDelta
return error # incase useful
# if this module is run as a script, run the if and create a object of class defined above
if __name__ == "__main__":
bpn = backPropagationNetwork((2,2,1)) # calling __init__ constructor for the bpn object
print(bpn.shape)
print(bpn.weights)
# X = np.array([[0,0],[1,0],[0.5,3]])
# Y = bpn.run(X)
# print(X)
# print(Y)
X = np.array([[0,0],[1,1],[0,1],[1,0]])
Y = np.array([[0.05],[0.05],[0.95],[0.95]])
maxIteration = 100000
minError = 1e-5
for i in range(maxIteration+1):
err = bpn.trainEpoch(X,Y,momentum=0.7)
if i%2500 == 0:
print "iteration {0}\t error: {1:0.6f}".format(i, err)
if err <= minError:
print "Minimum error reached as iteration {0}".format(i)
break
Ycomputed = bpn.run(X)
print "Input: {0}\n Output: {1}".format(X,Ycomputed) | tejesh95/NeuralNetworks | Neural.py | Python | unlicense | 8,999 |
import os
import unittest
from math import pi
import numpy
from kiva import agg
def save_path(filename):
return filename
def draw_arcs(gc, x2, y2, radiusstep=25.0):
gc.set_stroke_color((0.2,0.2,0.2)) # lightgray
gc.move_to(0, 0)
gc.line_to(100, 0)
gc.line_to(x2, y2)
gc.stroke_path()
gc.set_stroke_color((0,0,0))
for i in range(7):
gc.move_to(0, 0);
gc.arc_to(100, 0, x2, y2, i*radiusstep+20.0)
gc.stroke_path()
class TestAffineMatrix(unittest.TestCase):
def test_arc_to(self):
gc = agg.GraphicsContextArray((640,480), "rgba32")
axes = agg.CompiledPath()
axes.move_to(0.5, 50.5)
axes.line_to(100.5, 50.5)
axes.move_to(50.5, 0.5)
axes.line_to(50.5, 100.5)
box = agg.CompiledPath()
box.move_to(0.5, 0.5)
box.line_to(100.5, 0.5)
box.line_to(100.5, 100.5)
box.line_to(0.5, 100.5)
box.close_path()
arc = agg.CompiledPath()
arc.move_to(10, 10)
arc.line_to(20, 10)
arc.arc_to(40, 10, 40, 30, 20.0)
arc.line_to(40, 40)
whole_shebang = agg.CompiledPath()
whole_shebang.save_ctm()
whole_shebang.add_path(axes)
whole_shebang.add_path(box)
whole_shebang.translate_ctm(0.0, 50.5)
whole_shebang.add_path(arc)
whole_shebang.translate_ctm(50.5, 50.5)
whole_shebang.rotate_ctm(-agg.pi/2)
whole_shebang.add_path(arc)
whole_shebang.rotate_ctm(agg.pi/2)
whole_shebang.translate_ctm(50.5, -50.5)
whole_shebang.rotate_ctm(-agg.pi)
whole_shebang.add_path(arc)
whole_shebang.rotate_ctm(agg.pi)
whole_shebang.translate_ctm(-50.5, -50.5)
whole_shebang.rotate_ctm(-3*agg.pi/2)
whole_shebang.add_path(arc)
whole_shebang.restore_ctm()
gc.set_stroke_color((1.0,0.0,0.0))
gc.set_line_width(1.0)
ctm1 = gc.get_ctm()
gc.translate_ctm(50.5, 300.5)
gc.add_path(whole_shebang)
gc.stroke_path()
gc.translate_ctm(130.5, 50.0)
ctm2 = gc.get_ctm()
gc.rotate_ctm(-agg.pi/6)
gc.add_path(whole_shebang)
gc.set_stroke_color((0.0,0.0,1.0))
gc.stroke_path()
gc.set_ctm(ctm2)
gc.translate_ctm(130.5, 0.0)
ctm2 = gc.get_ctm()
gc.rotate_ctm(-agg.pi/3)
gc.scale_ctm(1.0, 2.0)
gc.add_path(whole_shebang)
gc.stroke_path()
gc.set_ctm(ctm1)
ctm1 = gc.get_ctm()
gc.translate_ctm(150.5, 20.5)
draw_arcs(gc, 70.5, 96.5)
gc.translate_ctm(300.5, 0)
draw_arcs(gc, 160.5, 76.5, 50.0)
gc.set_ctm(ctm1)
gc.translate_ctm(120.5, 100.5)
gc.scale_ctm(-1.0, 1.0)
draw_arcs(gc, 70.5, 96.5)
gc.translate_ctm(-300.5, 100.5)
gc.scale_ctm(0.75, -1.0)
draw_arcs(gc, 160.5, 76.5, 50.0)
gc.save(save_path("arc_to.png"))
def test_arc(self):
gc = agg.GraphicsContextArray((640,648))
gc.save(save_path("arc.png"))
def test_skewing_matrix(self):
val = agg.skewing_matrix(pi/4.,pi/4.)
desired = numpy.array([ 1.0,1.0,1.0,1.0,0.0,0.0])
actual = val.asarray()
assert(numpy.allclose(desired,actual))
if __name__ == "__main__":
unittest.main()
| tommy-u/enable | integrationtests/kiva/agg/test_arc.py | Python | bsd-3-clause | 3,344 |
import arcpy, os, sys
| gcsadovy/generalPY | test3.py | Python | gpl-3.0 | 23 |
"""
sentry.testutils.cases
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = (
'TestCase', 'TransactionTestCase', 'APITestCase', 'AuthProviderTestCase',
'RuleTestCase', 'PermissionTestCase', 'PluginTestCase', 'CliTestCase',
'AcceptanceTestCase',
)
import base64
import os
import os.path
import pytest
import six
import types
from click.testing import CliRunner
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth import login
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, TransactionTestCase
from django.utils.importlib import import_module
from exam import before, fixture, Exam
from pkg_resources import iter_entry_points
from rest_framework.test import APITestCase as BaseAPITestCase
from six.moves.urllib.parse import urlencode
from sentry import auth
from sentry.auth.providers.dummy import DummyProvider
from sentry.constants import MODULE_ROOT
from sentry.models import GroupMeta, ProjectOption
from sentry.plugins import plugins
from sentry.rules import EventState
from sentry.utils import json
from sentry.utils.auth import SSO_SESSION_KEY
from .fixtures import Fixtures
from .helpers import AuthProvider, Feature, get_auth_header, TaskRunner, override_options
DEFAULT_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
class BaseTestCase(Fixtures, Exam):
urls = 'sentry.web.urls'
def assertRequiresAuthentication(self, path, method='GET'):
resp = getattr(self.client, method.lower())(path)
assert resp.status_code == 302
assert resp['Location'].startswith('http://testserver' + reverse('sentry-login'))
@before
def setup_dummy_auth_provider(self):
auth.register('dummy', DummyProvider)
self.addCleanup(auth.unregister, 'dummy', DummyProvider)
@before
def setup_session(self):
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
self.session = session
def tasks(self):
return TaskRunner()
def feature(self, name, active=True):
"""
>>> with self.feature('feature:name')
>>> # ...
"""
return Feature(name, active)
def auth_provider(self, name, cls):
"""
>>> with self.auth_provider('name', Provider)
>>> # ...
"""
return AuthProvider(name, cls)
def save_session(self):
self.session.save()
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
session_cookie = settings.SESSION_COOKIE_NAME
self.client.cookies[session_cookie] = self.session.session_key
self.client.cookies[session_cookie].update(cookie_data)
def login_as(self, user, organization_id=None):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
request = HttpRequest()
request.session = self.session
login(request, user)
request.user = user
if organization_id:
request.session[SSO_SESSION_KEY] = six.text_type(organization_id)
# Save the session values.
self.save_session()
def load_fixture(self, filepath):
filepath = os.path.join(
MODULE_ROOT,
'tests',
'fixtures',
filepath,
)
with open(filepath, 'rb') as fp:
return fp.read()
def _pre_setup(self):
super(BaseTestCase, self)._pre_setup()
cache.clear()
ProjectOption.objects.clear_local_cache()
GroupMeta.objects.clear_local_cache()
def _post_teardown(self):
super(BaseTestCase, self)._post_teardown()
def _makeMessage(self, data):
return json.dumps(data).encode('utf-8')
def _makePostMessage(self, data):
return base64.b64encode(self._makeMessage(data))
def _postWithHeader(self, data, key=None, secret=None, protocol=None):
if key is None:
key = self.projectkey.public_key
secret = self.projectkey.secret_key
message = self._makePostMessage(data)
with self.tasks():
resp = self.client.post(
reverse('sentry-api-store'), message,
content_type='application/octet-stream',
HTTP_X_SENTRY_AUTH=get_auth_header(
'_postWithHeader/0.0.0',
key,
secret,
protocol,
),
)
return resp
def _postCspWithHeader(self, data, key=None, **extra):
if isinstance(data, dict):
body = json.dumps({'csp-report': data})
elif isinstance(data, six.string_types):
body = data
path = reverse('sentry-api-csp-report', kwargs={'project_id': self.project.id})
path += '?sentry_key=%s' % self.projectkey.public_key
with self.tasks():
return self.client.post(
path, data=body,
content_type='application/csp-report',
HTTP_USER_AGENT=DEFAULT_USER_AGENT,
**extra
)
def _getWithReferer(self, data, key=None, referer='sentry.io', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
'sentry_data': message,
}
with self.tasks():
resp = self.client.get(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urlencode(qs)),
**headers
)
return resp
def _postWithReferer(self, data, key=None, referer='sentry.io', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
}
with self.tasks():
resp = self.client.post(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urlencode(qs)),
data=message,
content_type='application/json',
**headers
)
return resp
def options(self, options):
"""
A context manager that temporarily sets a global option and reverts
back to the original value when exiting the context.
"""
return override_options(options)
@contextmanager
def dsn(self, dsn):
"""
A context manager that temporarily sets the internal client's DSN
"""
from raven.contrib.django.models import client
try:
client.set_dsn(dsn)
yield
finally:
client.set_dsn(None)
_postWithSignature = _postWithHeader
_postWithNewSignature = _postWithHeader
class TestCase(BaseTestCase, TestCase):
pass
class TransactionTestCase(BaseTestCase, TransactionTestCase):
pass
class APITestCase(BaseTestCase, BaseAPITestCase):
pass
class AuthProviderTestCase(TestCase):
provider = DummyProvider
provider_name = 'dummy'
def setUp(self):
super(AuthProviderTestCase, self).setUp()
# TestCase automatically sets up dummy provider
if self.provider_name != 'dummy' or self.provider != DummyProvider:
auth.register(self.provider_name, self.provider)
self.addCleanup(auth.unregister, self.provider_name, self.provider)
class RuleTestCase(TestCase):
rule_cls = None
def get_event(self):
return self.event
def get_rule(self, data=None):
return self.rule_cls(
project=self.project,
data=data or {},
)
def get_state(self, **kwargs):
kwargs.setdefault('is_new', True)
kwargs.setdefault('is_regression', True)
kwargs.setdefault('is_sample', True)
return EventState(**kwargs)
def assertPasses(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is True
def assertDoesNotPass(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is False
class PermissionTestCase(TestCase):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.owner = self.create_user(is_superuser=False)
self.organization = self.create_organization(
owner=self.owner,
flags=0, # disable default allow_joinleave access
)
self.team = self.create_team(organization=self.organization)
def assert_can_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 200 and resp.status_code < 300
def assert_cannot_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 300
def assert_member_can_access(self, path):
return self.assert_role_can_access(path, 'member')
def assert_teamless_member_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[],
)
self.assert_can_access(user, path)
def assert_member_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'member')
def assert_manager_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'manager')
def assert_teamless_member_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[],
)
self.assert_cannot_access(user, path)
def assert_team_admin_can_access(self, path):
return self.assert_role_can_access(path, 'owner')
def assert_teamless_admin_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='admin', teams=[],
)
self.assert_can_access(user, path)
def assert_team_admin_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'admin')
def assert_teamless_admin_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='admin', teams=[],
)
self.assert_cannot_access(user, path)
def assert_team_owner_can_access(self, path):
return self.assert_role_can_access(path, 'owner')
def assert_owner_can_access(self, path):
return self.assert_role_can_access(path, 'owner')
def assert_owner_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'owner')
def assert_non_member_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.assert_cannot_access(user, path)
def assert_role_can_access(self, path, role):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role=role, teams=[self.team],
)
self.assert_can_access(user, path)
def assert_role_cannot_access(self, path, role):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role=role, teams=[self.team],
)
self.assert_cannot_access(user, path)
class PluginTestCase(TestCase):
plugin = None
def setUp(self):
super(PluginTestCase, self).setUp()
# Old plugins, plugin is a class, new plugins, it's an instance
# New plugins don't need to be registered
if isinstance(self.plugin, (type, types.ClassType)):
plugins.register(self.plugin)
self.addCleanup(plugins.unregister, self.plugin)
def assertAppInstalled(self, name, path):
for ep in iter_entry_points('sentry.apps'):
if ep.name == name:
ep_path = ep.module_name
if ep_path == path:
return
self.fail('Found app in entry_points, but wrong class. Got %r, expected %r' % (ep_path, path))
self.fail('Missing app from entry_points: %r' % (name,))
def assertPluginInstalled(self, name, plugin):
path = type(plugin).__module__ + ':' + type(plugin).__name__
for ep in iter_entry_points('sentry.plugins'):
if ep.name == name:
ep_path = ep.module_name + ':' + '.'.join(ep.attrs)
if ep_path == path:
return
self.fail('Found plugin in entry_points, but wrong class. Got %r, expected %r' % (ep_path, path))
self.fail('Missing plugin from entry_points: %r' % (name,))
class CliTestCase(TestCase):
runner = fixture(CliRunner)
command = None
default_args = []
def invoke(self, *args):
args += tuple(self.default_args)
return self.runner.invoke(self.command, args, obj={})
@pytest.mark.usefixtures('browser')
class AcceptanceTestCase(TransactionTestCase):
def save_session(self):
self.session.save()
self.browser.save_cookie(
name=settings.SESSION_COOKIE_NAME,
value=self.session.session_key,
)
| zenefits/sentry | src/sentry/testutils/cases.py | Python | bsd-3-clause | 14,473 |
# Generated by Django 2.1.7 on 2019-03-09 11:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("campaign", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="campaign",
name="description",
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name="campaign",
name="start_date",
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name="campaign",
name="url",
field=models.URLField(blank=True),
),
]
| fin/froide | froide/campaign/migrations/0002_auto_20190309_1241.py | Python | mit | 687 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Example illustrating the use of Apache Beam for solving distributing
optimization tasks.
This example solves an optimization problem which consists of distributing a
number of crops to grow in several greenhouses. The decision where to grow the
crop has an impact on the production parameters associated with the greenhouse,
which affects the total cost of production at the greenhouse. Additionally,
each crop needs to be transported to a customer so the decision where to grow
the crop has an impact on the transportation costs as well.
This type of optimization problems are known as mixed-integer programs as they
exist of discrete parameters (do we produce a crop in greenhouse A, B or C?)
and continuous parameters (the greenhouse production parameters).
Running this example requires NumPy and SciPy. The input consists of a CSV file
with the following columns (Tx representing the transporation cost/unit if the
crop is produced in greenhouse x): Crop name, Quantity, Ta, Tb, Tc, ....
Example input file with 5 crops and 3 greenhouses (a transporation cost of 0
forbids production of the crop in a greenhouse):
OP01,8,12,0,12
OP02,30,14,3,12
OP03,25,7,3,14
OP04,87,7,2,2
OP05,19,1,7,10
The pipeline consists of three phases:
- Creating a grid of mappings (assignment of each crop to a greenhouse)
- For each mapping and each greenhouse, optimization of the production
parameters for cost, addition of the transporation costs, and aggregation
of the costs for each mapping.
- Selecting the mapping with the lowest cost.
"""
from __future__ import absolute_import
from __future__ import division
import argparse
import logging
import string
import uuid
from collections import defaultdict
import numpy as np
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from scipy.optimize import minimize
class Simulator(object):
"""Greenhouse simulation for the optimization of greenhouse parameters."""
def __init__(self, quantities):
super(Simulator, self).__init__()
self.quantities = np.atleast_1d(quantities)
self.A = np.array([[3.0, 10, 30],
[0.1, 10, 35],
[3.0, 10, 30],
[0.1, 10, 35]])
self.P = 1e-4 * np.array([[3689, 1170, 2673],
[4699, 4387, 7470],
[1091, 8732, 5547],
[381, 5743, 8828]])
a0 = np.array([[1.0, 1.2, 3.0, 3.2]])
coeff = np.sum(np.cos(np.dot(a0.T, self.quantities[None, :])), axis=1)
self.alpha = coeff / np.sum(coeff)
def simulate(self, xc):
# Map the input parameter to a cost for each crop.
weighted_distance = np.sum(self.A * np.square(xc - self.P), axis=1)
f = -np.sum(self.alpha * np.exp(-weighted_distance))
return np.square(f) * np.log(self.quantities)
class CreateGrid(beam.PTransform):
"""A transform for generating the mapping grid.
Input: Formatted records of the input file, e.g.,
{
'crop': 'OP009',
'quantity': 102,
'transport_costs': [('A', None), ('B', 3), ('C', 8)]
}
Output: tuple (mapping_identifier, {crop -> greenhouse})
"""
class PreGenerateMappings(beam.DoFn):
"""ParDo implementation forming based on two elements a small sub grid.
This facilitates parallellization of the grid generation.
Emits two PCollections: the subgrid represented as collection of lists of
two tuples, and a list of remaining records. Both serve as an input to
GenerateMappings.
"""
def process(self, element):
records = list(element[1])
# Split of 2 crops and pre-generate the subgrid.
# Select the crop with highest number of possible greenhouses:
# in case two crops with only a single possible greenhouse were selected
# the subgrid would consist of only 1 element.
best_split = np.argsort([-len(r['transport_costs']) for r in records])[:2]
rec1 = records[best_split[0]]
rec2 = records[best_split[1]]
# Generate & emit all combinations
for a in rec1['transport_costs']:
if a[1]:
for b in rec2['transport_costs']:
if b[1]:
combination = [(rec1['crop'], a[0]), (rec2['crop'], b[0])]
yield pvalue.TaggedOutput('splitted', combination)
# Pass on remaining records
remaining = [rec for i, rec in enumerate(records) if i not in best_split]
yield pvalue.TaggedOutput('combine', remaining)
class GenerateMappings(beam.DoFn):
"""ParDo implementation to generate all possible mappings.
Input: output of PreGenerateMappings
Output: tuples of the form (mapping_identifier, {crop -> greenhouse})
"""
@staticmethod
def _coordinates_to_greenhouse(coordinates, greenhouses, crops):
# Map the grid coordinates back to greenhouse labels
arr = []
for coord in coordinates:
arr.append(greenhouses[coord])
return dict(zip(crops, np.array(arr)))
def process(self, element, records):
# Generate available greenhouses and grid coordinates for each crop.
grid_coordinates = []
for rec in records:
# Get indices for available greenhouses (w.r.t crops)
filtered = [i for i, av in enumerate(rec['transport_costs']) if av[1]]
grid_coordinates.append(filtered)
# Generate all mappings
grid = np.vstack(list(map(np.ravel, np.meshgrid(*grid_coordinates)))).T
crops = [rec['crop'] for rec in records]
greenhouses = [rec[0] for rec in records[0]['transport_costs']]
for point in grid:
# translate back to greenhouse label
mapping = self._coordinates_to_greenhouse(point, greenhouses, crops)
assert all(rec[0] not in mapping for rec in element)
# include the incomplete mapping of 2 crops
mapping.update(element)
# include identifier
yield (uuid.uuid4().hex, mapping)
def expand(self, records):
o = (
records
| 'pair one' >> beam.Map(lambda x: (1, x))
| 'group all records' >> beam.GroupByKey()
| 'split one of' >> beam.ParDo(self.PreGenerateMappings())
.with_outputs('splitted', 'combine')
)
# Create mappings, and prevent fusion (this limits the parallelization
# in the optimization step)
mappings = (
o.splitted
| 'create mappings' >> beam.ParDo(self.GenerateMappings(),
pvalue.AsSingleton(o.combine))
| 'prevent fusion' >> beam.Reshuffle()
)
return mappings
class OptimizeGrid(beam.PTransform):
"""A transform for optimizing all greenhouses of the mapping grid."""
class CreateOptimizationTasks(beam.DoFn):
"""
Create tasks for optimization.
Input: (mapping_identifier, {crop -> greenhouse})
Output: ((mapping_identifier, greenhouse), [(crop, quantity),...])
"""
def process(self, element, quantities):
mapping_identifier, mapping = element
# Create (crop, quantity) lists for each greenhouse
greenhouses = defaultdict(list)
for crop, greenhouse in mapping.items():
quantity = quantities[crop]
greenhouses[greenhouse].append((crop, quantity))
# Create input for OptimizeProductParameters
for greenhouse, crops in greenhouses.items():
key = (mapping_identifier, greenhouse)
yield (key, crops)
class OptimizeProductParameters(beam.DoFn):
"""Solve the optimization task to determine optimal production parameters.
Input: ((mapping_identifier, greenhouse), [(crop, quantity),...])
Two outputs:
- solution: (mapping_identifier, (greenhouse, [production parameters]))
- costs: (crop, greenhouse, mapping_identifier, cost)
"""
@staticmethod
def _optimize_production_parameters(sim):
# setup initial starting point & bounds
x0 = 0.5 * np.ones(3)
bounds = list(zip(np.zeros(3), np.ones(3)))
# Run L-BFGS-B optimizer
result = minimize(lambda x: np.sum(sim.simulate(x)), x0, bounds=bounds)
return result.x.tolist(), sim.simulate(result.x)
def process(self, element):
mapping_identifier, greenhouse = element[0]
crops, quantities = zip(*element[1])
sim = Simulator(quantities)
optimum, costs = self._optimize_production_parameters(sim)
solution = (mapping_identifier, (greenhouse, optimum))
yield pvalue.TaggedOutput('solution', solution)
for crop, cost, quantity in zip(crops, costs, quantities):
costs = (crop, greenhouse, mapping_identifier, cost * quantity)
yield pvalue.TaggedOutput('costs', costs)
def expand(self, inputs):
mappings, quantities = inputs
opt = (
mappings
| 'optimization tasks' >> beam.ParDo(self.CreateOptimizationTasks(),
pvalue.AsDict(quantities))
| 'optimize' >> beam.ParDo(self.OptimizeProductParameters())
.with_outputs('costs', 'solution')
)
return opt
class CreateTransportData(beam.DoFn):
"""Transform records to pvalues ((crop, greenhouse), transport_cost)"""
def process(self, record):
crop = record['crop']
for greenhouse, transport_cost in record['transport_costs']:
yield ((crop, greenhouse), transport_cost)
def add_transport_costs(element, transport, quantities):
"""Adds the transport cost for the crop to the production cost.
elements are of the form (crop, greenhouse, mapping, cost), the cost only
corresponds to the production cost. Return the same format, but including
the transport cost.
"""
crop = element[0]
cost = element[3]
# lookup & compute cost
transport_key = element[:2]
transport_cost = transport[transport_key] * quantities[crop]
return element[:3] + (cost + transport_cost,)
def parse_input(line):
# Process each line of the input file to a dict representing each crop
# and the transport costs
columns = line.split(',')
# Assign each greenhouse a character
transport_costs = []
for greenhouse, cost in zip(string.ascii_uppercase, columns[2:]):
info = (greenhouse, int(cost) if cost else None)
transport_costs.append(info)
return {
'crop': columns[0],
'quantity': int(columns[1]),
'transport_costs': transport_costs
}
def format_output(element):
"""Transforms the datastructure (unpack lists introduced by CoGroupByKey)
before writing the result to file.
"""
result = element[1]
result['cost'] = result['cost'][0]
result['production'] = dict(result['production'])
result['mapping'] = result['mapping'][0]
return result
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
required=True,
help='Input description to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Parse input file
records = (
p
| 'read' >> beam.io.ReadFromText(known_args.input)
| 'process input' >> beam.Map(parse_input)
)
# Create two pcollections, used as side inputs
transport = (
records
| 'create transport' >> beam.ParDo(CreateTransportData())
)
quantities = (
records
| 'create quantities' >> beam.Map(lambda r: (r['crop'], r['quantity']))
)
# Generate all mappings and optimize greenhouse production parameters
mappings = records | CreateGrid()
opt = (mappings, quantities) | OptimizeGrid()
# Then add the transport costs and sum costs per crop.
costs = (
opt.costs
| 'include transport' >> beam.Map(add_transport_costs,
pvalue.AsDict(transport),
pvalue.AsDict(quantities))
| 'drop crop and greenhouse' >> beam.Map(lambda x: (x[2], x[3]))
| 'aggregate crops' >> beam.CombinePerKey(sum)
)
# Join cost, mapping and production settings solution on mapping identifier.
# Then select best.
join_operands = {
'cost': costs,
'production': opt.solution,
'mapping': mappings
}
best = (
join_operands
| 'join' >> beam.CoGroupByKey()
| 'select best' >> beam.CombineGlobally(min, key=lambda x: x[1]['cost'])
.without_defaults()
| 'format output' >> beam.Map(format_output)
)
# pylint: disable=expression-not-assigned
best | 'write optimum' >> beam.io.WriteToText(known_args.output)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| mxm/incubator-beam | sdks/python/apache_beam/examples/complete/distribopt.py | Python | apache-2.0 | 13,825 |
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from optparse import make_option
class Command(BaseCommand):
"""Create a user.
"""
args = '<name email password>'
help = 'Create a user with specified attributes'
option_list = BaseCommand.option_list + (
make_option('--first',
dest='first_name',
help='user first name'),
make_option('--last',
dest='last_name',
help='user last name'))
def handle(self, *args, **options):
try:
User.objects.get(username=args[0])
except User.DoesNotExist:
user = User.objects.create_user(*args)
if options['first_name']:
user.first_name = options['first_name']
if options['last_name']:
user.last_name = options['last_name']
user.save()
print('User {0} created'.format(args[0]))
else:
print('User {0} already exists'.format(args[0]))
| orontee/porte-monnaie | site/tracker/management/commands/createuser.py | Python | gpl-3.0 | 1,075 |
import pymysql
from flask_restful import Resource
from flask import abort
ALLOWED_SHOW = ('processlist', 'databases', 'plugins', 'privileges')
class Mysql(Resource):
def __init__(self):
self.connection = pymysql.connect(user='root')
self.cursor = self.connection.cursor()
def _execute(self, sql):
self.cursor.execute(sql)
desc_id = tuple(x[0] for x in self.cursor.description)
query_result = self.cursor.fetchall()
results = [dict(zip(desc_id, item)) for item in query_result]
return results
def get(self, cmd):
if cmd in ALLOWED_SHOW:
return self._execute('show ' + cmd)
else:
abort(404)
class MysqlDatabase(Mysql):
def get(self, dbname):
try:
self.connection.select_db(dbname)
except pymysql.InternalError as e:
abort(400, e.args)
return self._execute('show tables')
def post(self, dbname):
try:
self.cursor.execute('create database ' + dbname)
except pymysql.ProgrammingError as e:
abort(400, e.args)
def delete(self, dbname):
try:
self.cursor.execute('drop database if exists ' + dbname)
except pymysql.ProgrammingError as e:
abort(400, e.args)
| natict/roomservice | roomservice/mysql.py | Python | mit | 1,306 |
#!/usr/bin/env python
# encoding: utf-8
"""
Reboot a droplet.
"""
import pyocean
import time
import os
# Put your DigitalOcean access token here or set from environment variables
ACCESS_TOKEN = '' or os.getenv('ACCESS_TOKEN')
DROPLET_ID = '' or os.getenv('DROPLET_ID')
try:
digitalocean = pyocean.DigitalOcean(ACCESS_TOKEN)
droplet = digitalocean.droplet.get(DROPLET_ID)
print("Rebooting droplet '%s'..." % droplet.name)
droplet.reboot()
except pyocean.exceptions.DOException as e:
print('Reboot failed: %s' % e)
else:
print('done.')
| fxdgear/pyocean | examples/droplet-reboot.py | Python | mit | 562 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.http_routing.models.ir_http import slugify
from odoo import api, fields, models
class Page(models.Model):
_name = 'website.page'
_inherits = {'ir.ui.view': 'view_id'}
_inherit = 'website.published.multi.mixin'
_description = 'Page'
_order = 'website_id'
url = fields.Char('Page URL')
view_id = fields.Many2one('ir.ui.view', string='View', required=True, ondelete="cascade")
website_indexed = fields.Boolean('Is Indexed', default=True)
date_publish = fields.Datetime('Publishing Date')
# This is needed to be able to display if page is a menu in /website/pages
menu_ids = fields.One2many('website.menu', 'page_id', 'Related Menus')
is_homepage = fields.Boolean(compute='_compute_homepage', inverse='_set_homepage', string='Homepage')
is_visible = fields.Boolean(compute='_compute_visible', string='Is Visible')
# Page options
header_overlay = fields.Boolean()
header_color = fields.Char()
# don't use mixin website_id but use website_id on ir.ui.view instead
website_id = fields.Many2one(related='view_id.website_id', store=True, readonly=False, ondelete='cascade')
arch = fields.Text(related='view_id.arch', readonly=False, depends_context=('website_id',))
def _compute_homepage(self):
for page in self:
page.is_homepage = page == self.env['website'].get_current_website().homepage_id
def _set_homepage(self):
for page in self:
website = self.env['website'].get_current_website()
if page.is_homepage:
if website.homepage_id != page:
website.write({'homepage_id': page.id})
else:
if website.homepage_id == page:
website.write({'homepage_id': None})
def _compute_visible(self):
for page in self:
page.is_visible = page.website_published and (
not page.date_publish or page.date_publish < fields.Datetime.now()
)
def _is_most_specific_page(self, page_to_test):
'''This will test if page_to_test is the most specific page in self.'''
pages_for_url = self.sorted(key=lambda p: not p.website_id).filtered(lambda page: page.url == page_to_test.url)
# this works because pages are _order'ed by website_id
most_specific_page = pages_for_url[0]
return most_specific_page == page_to_test
def get_page_properties(self):
self.ensure_one()
res = self.read([
'id', 'view_id', 'name', 'url', 'website_published', 'website_indexed', 'date_publish',
'menu_ids', 'is_homepage', 'website_id', 'visibility', 'groups_id'
])[0]
if not res['groups_id']:
res['group_id'] = self.env.ref('base.group_user').name_get()[0]
elif len(res['groups_id']) == 1:
res['group_id'] = self.env['res.groups'].browse(res['groups_id']).name_get()[0]
del res['groups_id']
res['visibility_password'] = res['visibility'] == 'password' and self.visibility_password_display or ''
return res
@api.model
def save_page_info(self, website_id, data):
website = self.env['website'].browse(website_id)
page = self.browse(int(data['id']))
# If URL has been edited, slug it
original_url = page.url
url = data['url']
if not url.startswith('/'):
url = '/' + url
if page.url != url:
url = '/' + slugify(url, max_length=1024, path=True)
url = self.env['website'].get_unique_path(url)
# If name has changed, check for key uniqueness
if page.name != data['name']:
page_key = self.env['website'].get_unique_key(slugify(data['name']))
else:
page_key = page.key
menu = self.env['website.menu'].search([('page_id', '=', int(data['id']))])
if not data['is_menu']:
# If the page is no longer in menu, we should remove its website_menu
if menu:
menu.unlink()
else:
# The page is now a menu, check if has already one
if menu:
menu.write({'url': url})
else:
self.env['website.menu'].create({
'name': data['name'],
'url': url,
'page_id': data['id'],
'parent_id': website.menu_id.id,
'website_id': website.id,
})
# Edits via the page manager shouldn't trigger the COW
# mechanism and generate new pages. The user manages page
# visibility manually with is_published here.
w_vals = {
'key': page_key,
'name': data['name'],
'url': url,
'is_published': data['website_published'],
'website_indexed': data['website_indexed'],
'date_publish': data['date_publish'] or None,
'is_homepage': data['is_homepage'],
'visibility': data['visibility'],
}
if page.visibility == 'restricted_group' and data['visibility'] != "restricted_group":
w_vals['groups_id'] = False
elif 'group_id' in data:
w_vals['groups_id'] = [data['group_id']]
if 'visibility_pwd' in data:
w_vals['visibility_password_display'] = data['visibility_pwd'] or ''
page.with_context(no_cow=True).write(w_vals)
# Create redirect if needed
if data['create_redirect']:
self.env['website.rewrite'].create({
'name': data['name'],
'redirect_type': data['redirect_type'],
'url_from': original_url,
'url_to': url,
'website_id': website.id,
})
return url
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
if default:
if not default.get('view_id'):
view = self.env['ir.ui.view'].browse(self.view_id.id)
new_view = view.copy({'website_id': default.get('website_id')})
default['view_id'] = new_view.id
default['url'] = default.get('url', self.env['website'].get_unique_path(self.url))
return super(Page, self).copy(default=default)
@api.model
def clone_page(self, page_id, page_name=None, clone_menu=True):
""" Clone a page, given its identifier
:param page_id : website.page identifier
"""
page = self.browse(int(page_id))
copy_param = dict(name=page_name or page.name, website_id=self.env['website'].get_current_website().id)
if page_name:
page_url = '/' + slugify(page_name, max_length=1024, path=True)
copy_param['url'] = self.env['website'].get_unique_path(page_url)
new_page = page.copy(copy_param)
# Should not clone menu if the page was cloned from one website to another
# Eg: Cloning a generic page (no website) will create a page with a website, we can't clone menu (not same container)
if clone_menu and new_page.website_id == page.website_id:
menu = self.env['website.menu'].search([('page_id', '=', page_id)], limit=1)
if menu:
# If the page being cloned has a menu, clone it too
menu.copy({'url': new_page.url, 'name': new_page.name, 'page_id': new_page.id})
return new_page.url + '?enable_editor=1'
def unlink(self):
# When a website_page is deleted, the ORM does not delete its
# ir_ui_view. So we got to delete it ourself, but only if the
# ir_ui_view is not used by another website_page.
for page in self:
# Other pages linked to the ir_ui_view of the page being deleted (will it even be possible?)
pages_linked_to_iruiview = self.search(
[('view_id', '=', page.view_id.id), ('id', '!=', page.id)]
)
if not pages_linked_to_iruiview and not page.view_id.inherit_children_ids:
# If there is no other pages linked to that ir_ui_view, we can delete the ir_ui_view
page.view_id.unlink()
return super(Page, self).unlink()
def write(self, vals):
if 'url' in vals and not vals['url'].startswith('/'):
vals['url'] = '/' + vals['url']
return super(Page, self).write(vals)
def get_website_meta(self):
self.ensure_one()
return self.view_id.get_website_meta()
| ygol/odoo | addons/website/models/website_page.py | Python | agpl-3.0 | 8,632 |
import threading
import time
import random
SLEEP = True
counter = 0
# Making race conditions visible by introducing sleep time periods
def sleep_for_a_while():
if SLEEP:
time.sleep(random.random())
def worker():
global counter
sleep_for_a_while()
old_counter = counter
sleep_for_a_while()
counter = old_counter + 1
sleep_for_a_while()
print("Counter: %d" % counter, end='')
sleep_for_a_while()
print()
sleep_for_a_while()
print("---------------------", end='')
sleep_for_a_while()
print()
print("Work started!")
sleep_for_a_while()
for t in range(10):
threading.Thread(target=worker).start()
sleep_for_a_while()
sleep_for_a_while()
print("Work finished!")
sleep_for_a_while() | rcanepa/cs-fundamentals | python/multiprocessing_multithreading/basic_threading.py | Python | mit | 755 |
# hard coded database parameters aren't we doing something bad
# should fix it later
def create_surrounding_hex(x, y, hexagon_size):
import pymongo
client = pymongo.MongoClient('candygram',27017)
db = client.map
hex_tiles_search = db.hex_tiles
hexagon_segment_one = hexagon_size / 4
hexagon_segment_two = hexagon_segment_one * 2
center_hex_x = x
center_hex_y = y
# need to check if a hex exists on each side of poly
# if it doesnt create it
# top
tcx = center_hex_x
tcy = center_hex_y + hexagon_segment_two + hexagon_segment_two
top_hex_search = hex_tiles_search.find_one({"$and": [{"centerX": tcx}, {"centerY": tcy}]})
if top_hex_search is None:
print("create top")
top_hex(x, y, hexagon_size)
# top right
trcx = center_hex_x + hexagon_segment_two + hexagon_segment_one
trcy = center_hex_y + hexagon_segment_two
topr_hex_search = hex_tiles_search.find_one({"$and": [{"centerX": trcx}, {"centerY": trcy}]})
if topr_hex_search is None:
print("create top right")
top_right_hex(x, y, hexagon_size)
# top left
tlcx = center_hex_x - hexagon_segment_two - hexagon_segment_one
tlcy = center_hex_y + hexagon_segment_two
topl_hex_search = hex_tiles_search.find_one({"$and": [{"centerX": tlcx}, {"centerY": tlcy}]})
if topl_hex_search is None:
print("create top left")
top_left_hex(x ,y, hexagon_size)
# bottom
bcx = center_hex_x
bcy = center_hex_y - hexagon_segment_two - hexagon_segment_two
bottom_hex_search = hex_tiles_search.find_one({"$and": [{"centerX": bcx}, {"centerY": bcy}]})
if bottom_hex_search is None:
print("create bottom")
bottom_hex(x, y, hexagon_size)
# bottom right
brcx = center_hex_x + hexagon_segment_two + hexagon_segment_one
brcy = center_hex_y - hexagon_segment_two
bottomr_hex_search = hex_tiles_search.find_one({"$and": [{"centerX": brcx}, {"centerY": brcy}]})
if bottomr_hex_search is None:
print("create bottom right")
bottom_right_hex(x, y, hexagon_size)
# bottom left
blcx = center_hex_x - hexagon_segment_two - hexagon_segment_one
blcy = center_hex_y - hexagon_segment_two
bottoml_hex_search = hex_tiles_search.find_one({"$and": [{"centerX": blcx}, {"centerY": blcy}]})
if bottoml_hex_search is None:
print("create bottom left")
bottom_left_hex(x, y, hexagon_size)
def top_hex(cx, cy, hexagon_size):
import pymongo
import random
import datetime
print("create hexagon on top of source point")
hexagon_segment_one = hexagon_size / 4
hexagon_segment_two = hexagon_segment_one * 2
# create hexagon top
newcx = cx
newcy = cy + hexagon_segment_two + hexagon_segment_two
X1 = newcx - hexagon_segment_two
Y1 = newcy
X2 = newcx - hexagon_segment_one
Y2 = newcy - hexagon_segment_two
X3 = newcx + hexagon_segment_one
Y3 = newcy - hexagon_segment_two
X4 = newcx + hexagon_segment_two
Y4 = newcy
X5 = newcx + hexagon_segment_one
Y5 = newcy + hexagon_segment_two
X6 = newcx - hexagon_segment_one
Y6 = newcy + hexagon_segment_two
X7 = newcx - hexagon_segment_two
Y7 = newcy
# what is its center?
centerX = (X1 + X2 + X3 + X4 + X5 + X6) / 6
centerY = (Y1+Y2+Y3+Y4+Y5+Y6)/6
#starting water value
rand_water = random.randint(0, 100)
#starting grass value
rand_grass = random.randint(0, 100)
hex1 = {
"loc" :
{
"type": "Polygon",
"coordinates": [ [ [ X1 , Y1 ] , [ X2 , Y2 ] , [ X3 , Y3 ] , [ X4 , Y4 ] , [X5 , Y5] , [ X6 , Y6 ] , [ X7 , Y7 ] ] ]
},
"centerXY": [centerX, centerY],
"centerX": centerX,
"centerY": centerY,
"hexcp1": [X1, Y1],
"hexcp2": [X2, Y2],
"hexcp3": [X3, Y3],
"hexcp4": [X4, Y4],
"hexcp5": [X5, Y5],
"hexcp6": [X6, Y6],
"hexcp7": [X7, Y7],
"Water": rand_water,
"Grass": rand_grass,
"Created": datetime.datetime.utcnow()
}
client = pymongo.MongoClient('candygram',27017)
db = client.map
hex_insert_collection = db.hex_tiles
new_tile_id = hex_insert_collection.insert(hex1)
return new_tile_id
def top_right_hex(cx, cy, hexagon_size):
import pymongo
import random
import datetime
# create hexagon top right
hexagon_segment_one = hexagon_size/4
hexagon_segment_two = hexagon_segment_one * 2
newcx = cx + hexagon_segment_two + hexagon_segment_one
newcy = cy + hexagon_segment_two
X1 = newcx - hexagon_segment_two
Y1 = newcy
X2 = newcx - hexagon_segment_one
Y2 = newcy - hexagon_segment_two
X3 = newcx + hexagon_segment_one
Y3 = newcy - hexagon_segment_two
X4 = newcx + hexagon_segment_two
Y4 = newcy
X5 = newcx + hexagon_segment_one
Y5 = newcy + hexagon_segment_two
X6 = newcx - hexagon_segment_one
Y6 = newcy + hexagon_segment_two
X7 = newcx - hexagon_segment_two
Y7 = newcy
centerX = (X1+X2+X3+X4+X5+X6)/6
centerY = (Y1+Y2+Y3+Y4+Y5+Y6)/6
#starting water value
rand_water = random.randint(0, 100)
#starting grass value
rand_grass = random.randint(0, 100)
hex1 = {
"loc" :
{
"type": "Polygon",
"coordinates": [ [ [ X1 , Y1 ] , [ X2 , Y2 ] , [ X3 , Y3 ] , [ X4 , Y4 ] , [X5 , Y5] , [ X6 , Y6 ] , [ X7 , Y7 ] ] ]
},
"centerXY": [centerX, centerY],
"centerX": centerX,
"centerY": centerY,
"hexcp1": [X1, Y1],
"hexcp2": [X2, Y2],
"hexcp3": [X3, Y3],
"hexcp4": [X4, Y4],
"hexcp5": [X5, Y5],
"hexcp6": [X6, Y6],
"hexcp7": [X7, Y7],
"Water": rand_water,
"Grass": rand_grass,
"Created": datetime.datetime.utcnow()
}
client = pymongo.MongoClient('candygram',27017)
db = client.map
hex_insert_collection = db.hex_tiles
new_tile_id = hex_insert_collection.insert(hex1)
return new_tile_id
def top_left_hex(cx, cy, hexagon_size):
import pymongo
import random
import datetime
print("create hexagon on top left of source point")
# create hexagon top left
hexagon_segment_one = hexagon_size/4
hexagon_segment_two = hexagon_segment_one * 2
newcx = cx - hexagon_segment_two - hexagon_segment_one
newcy = cy + hexagon_segment_two
X1 = newcx - hexagon_segment_two
Y1 = newcy
X2 = newcx - hexagon_segment_one
Y2 = newcy - hexagon_segment_two
X3 = newcx + hexagon_segment_one
Y3 = newcy - hexagon_segment_two
X4 = newcx + hexagon_segment_two
Y4 = newcy
X5 = newcx + hexagon_segment_one
Y5 = newcy + hexagon_segment_two
X6 = newcx - hexagon_segment_one
Y6 = newcy + hexagon_segment_two
X7 = newcx - hexagon_segment_two
Y7 = newcy
# what is its center?
centerX = (X1+X2+X3+X4+X5+X6)/6
centerY = (Y1+Y2+Y3+Y4+Y5+Y6)/6
#starting water value
rand_water = random.randint(0, 100)
#starting grass value
rand_grass = random.randint(0, 100)
hex1 = {
"loc" :
{
"type": "Polygon",
"coordinates": [ [ [ X1 , Y1 ] , [ X2 , Y2 ] , [ X3 , Y3 ] , [ X4 , Y4 ] , [X5 , Y5] , [ X6 , Y6 ] , [ X7 , Y7 ] ] ]
},
"centerXY": [centerX, centerY],
"centerX": centerX,
"centerY": centerY,
"hexcp1": [X1, Y1],
"hexcp2": [X2, Y2],
"hexcp3": [X3, Y3],
"hexcp4": [X4, Y4],
"hexcp5": [X5, Y5],
"hexcp6": [X6, Y6],
"hexcp7": [X7, Y7],
"Water": rand_water,
"Grass": rand_grass,
"Created": datetime.datetime.utcnow()
}
client = pymongo.MongoClient('candygram',27017)
db = client.map
hex_insert_collection = db.hex_tiles
new_tile_id = hex_insert_collection.insert(hex1)
return new_tile_id
def bottom_hex(cx, cy ,hexagon_size):
import pymongo
import random
import datetime
print("create hexagon on bottom of source point")
# create hexagon bottom
hexagon_segment_one = hexagon_size/4
hexagon_segment_two = hexagon_segment_one * 2
newcx = cx
newcy = cy - hexagon_segment_two - hexagon_segment_two
X1 = newcx - hexagon_segment_two
Y1 = newcy
X2 = newcx - hexagon_segment_one
Y2 = newcy - hexagon_segment_two
X3 = newcx + hexagon_segment_one
Y3 = newcy - hexagon_segment_two
X4 = newcx + hexagon_segment_two
Y4 = newcy
X5 = newcx + hexagon_segment_one
Y5 = newcy + hexagon_segment_two
X6 = newcx - hexagon_segment_one
Y6 = newcy + hexagon_segment_two
X7 = newcx - hexagon_segment_two
Y7 = newcy
# what is its center?
centerX = (X1+X2+X3+X4+X5+X6)/6
centerY = (Y1+Y2+Y3+Y4+Y5+Y6)/6
#starting water value
rand_water = random.randint(0, 100)
#starting grass value
rand_grass = random.randint(0, 100)
hex1 = {
"loc" :
{
"type": "Polygon",
"coordinates": [ [ [ X1 , Y1 ] , [ X2 , Y2 ] , [ X3 , Y3 ] , [ X4 , Y4 ] , [X5 , Y5] , [ X6 , Y6 ] , [ X7 , Y7 ] ] ]
},
"centerXY": [centerX, centerY],
"centerX": centerX,
"centerY": centerY,
"hexcp1": [X1, Y1],
"hexcp2": [X2, Y2],
"hexcp3": [X3, Y3],
"hexcp4": [X4, Y4],
"hexcp5": [X5, Y5],
"hexcp6": [X6, Y6],
"hexcp7": [X7, Y7],
"Water": rand_water,
"Grass": rand_grass,
"Created": datetime.datetime.utcnow()
}
client = pymongo.MongoClient('candygram',27017)
db = client.map
hex_insert_collection = db.hex_tiles
new_tile_id = hex_insert_collection.insert(hex1)
return new_tile_id
def bottom_right_hex(cx, cy, hexagon_size):
import pymongo
import random
import datetime
print("create hexagon on bottom right from source point")
# create hexagon to bottom right
hexagon_segment_one = hexagon_size/4
hexagon_segment_two = hexagon_segment_one * 2
newcx = cx + hexagon_segment_two + hexagon_segment_one
newcy = cy - hexagon_segment_two
X1 = newcx - hexagon_segment_two
Y1 = newcy
X2 = newcx - hexagon_segment_one
Y2 = newcy - hexagon_segment_two
X3 = newcx + hexagon_segment_one
Y3 = newcy - hexagon_segment_two
X4 = newcx + hexagon_segment_two
Y4 = newcy
X5 = newcx + hexagon_segment_one
Y5 = newcy + hexagon_segment_two
X6 = newcx - hexagon_segment_one
Y6 = newcy + hexagon_segment_two
X7 = newcx - hexagon_segment_two
Y7 = newcy
# what is its center?
centerX = (X1+X2+X3+X4+X5+X6)/6
centerY = (Y1+Y2+Y3+Y4+Y5+Y6)/6
#starting water value
rand_water = random.randint(0, 100)
#starting grass value
rand_grass = random.randint(0, 100)
hex1 = {
"loc" :
{
"type": "Polygon",
"coordinates": [ [ [ X1 , Y1 ] , [ X2 , Y2 ] , [ X3 , Y3 ] , [ X4 , Y4 ] , [X5 , Y5] , [ X6 , Y6 ] , [ X7 , Y7 ] ] ]
},
"centerXY": [centerX, centerY],
"centerX": centerX,
"centerY": centerY,
"hexcp1": [X1, Y1],
"hexcp2": [X2, Y2],
"hexcp3": [X3, Y3],
"hexcp4": [X4, Y4],
"hexcp5": [X5, Y5],
"hexcp6": [X6, Y6],
"hexcp7": [X7, Y7],
"Water": rand_water,
"Grass": rand_grass,
"Created": datetime.datetime.utcnow()
}
client = pymongo.MongoClient('candygram',27017)
db = client.map
hex_insert_collection = db.hex_tiles
new_tile_id = hex_insert_collection.insert(hex1)
return new_tile_id
def bottom_left_hex(cx, cy, hexagon_size):
import pymongo
import random
import datetime
print("create hexagon on bottom left from source point")
# create hexagon bottom left
hexagon_segment_one = hexagon_size/4
hexagon_segment_two = hexagon_segment_one * 2
newcx = cx - hexagon_segment_two - hexagon_segment_one
newcy = cy - hexagon_segment_two
X1 = newcx - hexagon_segment_two
Y1 = newcy
X2 = newcx - hexagon_segment_one
Y2 = newcy - hexagon_segment_two
X3 = newcx + hexagon_segment_one
Y3 = newcy - hexagon_segment_two
X4 = newcx + hexagon_segment_two
Y4 = newcy
X5 = newcx + hexagon_segment_one
Y5 = newcy + hexagon_segment_two
X6 = newcx - hexagon_segment_one
Y6 = newcy + hexagon_segment_two
X7 = newcx - hexagon_segment_two
Y7 = newcy
# what is its center?
centerX = (X1+X2+X3+X4+X5+X6)/6
centerY = (Y1+Y2+Y3+Y4+Y5+Y6)/6
#starting water value
rand_water = random.randint(0, 100)
#starting grass value
rand_grass = random.randint(0, 100)
hex1 = {
"loc" :
{
"type": "Polygon",
"coordinates": [ [ [ X1 , Y1 ] , [ X2 , Y2 ] , [ X3 , Y3 ] , [ X4 , Y4 ] , [X5 , Y5] , [ X6 , Y6 ] , [ X7 , Y7 ] ] ]
},
"centerXY": [centerX, centerY],
"centerX": centerX,
"centerY": centerY,
"hexcp1": [X1, Y1],
"hexcp2": [X2, Y2],
"hexcp3": [X3, Y3],
"hexcp4": [X4, Y4],
"hexcp5": [X5, Y5],
"hexcp6": [X6, Y6],
"hexcp7": [X7, Y7],
"Water": rand_water,
"Grass": rand_grass,
"Created": datetime.datetime.utcnow()
}
client = pymongo.MongoClient('candygram',27017)
db = client.map
hex_insert_collection = db.hex_tiles
new_tile_id = hex_insert_collection.insert(hex1)
return new_tile_id
def hex_on_point(cx, cy, hexagon_size):
import pymongo
import random
import datetime
print("create hexagon on point")
hexagon_segment_one = hexagon_size/4
hexagon_segment_two = hexagon_segment_one * 2
newcx = cx
newcy = cy
X1 = newcx - hexagon_segment_two
Y1 = newcy
X2 = newcx - hexagon_segment_one
Y2 = newcy - hexagon_segment_two
X3 = newcx + hexagon_segment_one
Y3 = newcy - hexagon_segment_two
X4 = newcx + hexagon_segment_two
Y4 = newcy
X5 = newcx + hexagon_segment_one
Y5 = newcy + hexagon_segment_two
X6 = newcx - hexagon_segment_one
Y6 = newcy + hexagon_segment_two
# dont forget to include the 7th point to close the polygon
X7 = newcx - hexagon_segment_two
Y7 = newcy
# what is its center?
centerX = (X1+X2+X3+X4+X5+X6)/6
centerY = (Y1+Y2+Y3+Y4+Y5+Y6)/6
#starting water value
rand_water = random.randint(0, 100)
#starting grass value
rand_grass = random.randint(0, 100)
hex1 = {
"loc" :
{
"type": "Polygon",
"coordinates": [ [ [ X1 , Y1 ] , [ X2 , Y2 ] , [ X3 , Y3 ] , [ X4 , Y4 ] , [X5 , Y5] , [ X6 , Y6 ] , [ X7 , Y7 ] ] ]
},
"centerXY": [centerX, centerY],
"centerX": centerX,
"centerY": centerY,
"hexcp1": [X1, Y1],
"hexcp2": [X2, Y2],
"hexcp3": [X3, Y3],
"hexcp4": [X4, Y4],
"hexcp5": [X5, Y5],
"hexcp6": [X6, Y6],
"hexcp7": [X7, Y7],
"Water": rand_water,
"Grass": rand_grass,
"Created": datetime.datetime.utcnow()
}
client = pymongo.MongoClient('candygram',27017)
db = client.map
hex_insert_collection = db.hex_tiles
new_tile_id = hex_insert_collection.insert(hex1)
return new_tile_id
| erictheitguy/hexgenelife | theory/create_hex.py | Python | gpl-3.0 | 15,343 |
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from django.utils import formats
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, EmptyValueChildAdmin, EventAdmin,
FilteredChildAdmin, GroupAdmin, InvitationAdmin,
NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin, SwallowAdmin,
site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
def build_tbody_html(pk, href, extra_fields):
return (
'<tbody><tr class="row1">'
'<td class="action-checkbox">'
'<input type="checkbox" name="_selected_action" value="{}" '
'class="action-select" /></td>'
'<th class="field-name"><a href="{}">name</a></th>'
'{}</tr></tbody>'
).format(pk, href, extra_fields)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create_superuser(username=username, email='[email protected]', password='xxx')
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {'parent': {}})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ia.get_changelist_instance(request)
self.assertIs(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(new_child.id, link, '<td class="field-parent nowrap">-</td>')
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Empty value display can be set on AdminSite.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = '???'
m = ChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(new_child.id, link, '<td class="field-parent nowrap">???</td>')
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = EmptyValueChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(
new_child.id,
link,
'<td class="field-age_display">&dagger;</td>'
'<td class="field-age">-empty-</td>'
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Inclusion tag result_list generates a table when with default
ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(new_child.id, link, '<td class="field-parent nowrap">%s</td>' % new_parent)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = m.get_changelist_instance(request)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" />'
'</div>'
) % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name" />'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
with self.assertRaises(IncorrectLookupParameters):
m.get_changelist_instance(request)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = m.get_changelist_instance(request)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = m.get_changelist_instance(request)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
cl = m.get_changelist_instance(request)
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pk_in_search_fields(self):
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ['group__pk']
request = self.factory.get('/concert/', data={SEARCH_VAR: band.pk})
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 1)
request = self.factory.get('/concert/', data={SEARCH_VAR: band.pk + 5})
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 0)
def test_no_distinct_for_m2m_in_list_filter_without_params(self):
"""
If a ManyToManyField is in list_filter but isn't in any lookup params,
the changelist's query shouldn't have distinct.
"""
m = BandAdmin(Band, custom_site)
for lookup_params in ({}, {'name': 'test'}):
request = self.factory.get('/band/', lookup_params)
cl = m.get_changelist_instance(request)
self.assertFalse(cl.queryset.query.distinct)
# A ManyToManyField in params does have distinct applied.
request = self.factory.get('/band/', {'genres': '0'})
cl = m.get_changelist_instance(request)
self.assertTrue(cl.queryset.query.distinct)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
superuser = User.objects.create_superuser(username='super', email='super@localhost', password='secret')
self.client.force_login(superuser)
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, str(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 200
# 200 is the max we'll pass to ChangeList
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 30
# 30 is the max we'll pass to ChangeList for this test
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, str(swallow.origin))
self.assertContains(response, str(swallow.load))
self.assertContains(response, str(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_multiuser_edit(self):
"""
Simultaneous edits of list_editable fields on the changelist by
different users must not result in one user's edits creating a new
object instead of modifying the correct existing object (#11313).
"""
# To replicate this issue, simulate the following steps:
# 1. User1 opens an admin changelist with list_editable fields.
# 2. User2 edits object "Foo" such that it moves to another page in
# the pagination order and saves.
# 3. User1 edits object "Foo" and saves.
# 4. The edit made by User1 does not get applied to object "Foo" but
# instead is used to create a new object (bug).
# For this test, order the changelist by the 'speed' attribute and
# display 3 objects per page (SwallowAdmin.list_per_page = 3).
# Setup the test to reflect the DB state after step 2 where User2 has
# edited the first swallow object's speed from '4' to '1'.
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
b = Swallow.objects.create(origin='Swallow B', load=2, speed=2)
c = Swallow.objects.create(origin='Swallow C', load=5, speed=5)
d = Swallow.objects.create(origin='Swallow D', load=9, speed=9)
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
# Send the POST from User1 for step 3. It's still using the changelist
# ordering from before User2's edits in step 2.
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '3',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-id': str(d.pk),
'form-1-id': str(c.pk),
'form-2-id': str(a.pk),
'form-0-load': '9.0',
'form-0-speed': '9.0',
'form-1-load': '5.0',
'form-1-speed': '5.0',
'form-2-load': '5.0',
'form-2-speed': '4.0',
'_save': 'Save',
}
response = self.client.post(changelist_url, data, follow=True, extra={'o': '-2'})
# The object User1 edited in step 3 is displayed on the changelist and
# has the correct edits applied.
self.assertContains(response, '1 swallow was changed successfully.')
self.assertContains(response, a.origin)
a.refresh_from_db()
self.assertEqual(a.load, float(data['form-2-load']))
self.assertEqual(a.speed, float(data['form-2-speed']))
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
c.refresh_from_db()
self.assertEqual(c.load, float(data['form-1-load']))
self.assertEqual(c.speed, float(data['form-1-speed']))
d.refresh_from_db()
self.assertEqual(d.load, float(data['form-0-load']))
self.assertEqual(d.speed, float(data['form-0-speed']))
# No new swallows were created.
self.assertEqual(len(Swallow.objects.all()), 4)
def test_deterministic_order_for_unordered_model(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model doesn't have any
default ordering defined (#17198).
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model has a manager that
defines a default ordering (#17198).
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/')
cl = m.get_changelist_instance(request)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertEqual(expected_page_range, list(real_page_range))
def test_object_tools_displayed_no_add_permission(self):
"""
When ModelAdmin.has_add_permission() returns False, the object-tools
block is still shown.
"""
superuser = self._create_superuser('superuser')
m = EventAdmin(Event, custom_site)
request = self._mocked_authenticated_request('/event/', superuser)
self.assertFalse(m.has_add_permission(request))
response = m.changelist_view(request)
self.assertIn('<ul class="object-tools">', response.rendered_content)
# The "Add" button inside the object-tools shouldn't appear.
self.assertNotIn('Add ', response.rendered_content)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
def test_get_admin_log_templatetag_no_user(self):
"""
The {% get_admin_log %} tag should work without specifying a user.
"""
user = User(username='jondoe', password='secret', email='[email protected]')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
@override_settings(ROOT_URLCONF='admin_changelist.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email=None)
def test_add_row_selection(self):
"""
The status line for selected rows gets updated correctly (#22038).
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:auth_user_changelist'))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
| edmorley/django | tests/admin_changelist/tests.py | Python | bsd-3-clause | 38,622 |
import pytest
import os
import json
import utils
from utils import kfp_client_utils
from utils import minio_utils
from utils import sagemaker_utils
@pytest.mark.parametrize(
"test_file_dir",
[pytest.param("resources/config/kmeans-mnist-hpo", marks=pytest.mark.canary_test)],
)
def test_hyperparameter_tuning(
kfp_client, experiment_id, region, sagemaker_client, test_file_dir
):
download_dir = utils.mkdir(os.path.join(test_file_dir + "/generated"))
test_params = utils.load_params(
utils.replace_placeholders(
os.path.join(test_file_dir, "config.yaml"),
os.path.join(download_dir, "config.yaml"),
)
)
test_params["Arguments"]["channels"] = json.dumps(
test_params["Arguments"]["channels"]
)
test_params["Arguments"]["static_parameters"] = json.dumps(
test_params["Arguments"]["static_parameters"]
)
test_params["Arguments"]["integer_parameters"] = json.dumps(
test_params["Arguments"]["integer_parameters"]
)
test_params["Arguments"]["categorical_parameters"] = json.dumps(
test_params["Arguments"]["categorical_parameters"]
)
_, _, workflow_json = kfp_client_utils.compile_run_monitor_pipeline(
kfp_client,
experiment_id,
test_params["PipelineDefinition"],
test_params["Arguments"],
download_dir,
test_params["TestName"],
test_params["Timeout"],
)
outputs = {
"sagemaker-hyperparameter-tuning": [
"best_hyperparameters",
"best_job_name",
"hpo_job_name",
"model_artifact_url",
"training_image",
]
}
output_files = minio_utils.artifact_download_iterator(
workflow_json, outputs, download_dir
)
# Verify HPO job was successful on SageMaker
hpo_job_name = utils.read_from_file_in_tar(
output_files["sagemaker-hyperparameter-tuning"]["hpo_job_name"],
"hpo_job_name.txt",
)
print(f"HPO job name: {hpo_job_name}")
hpo_response = sagemaker_utils.describe_hpo_job(sagemaker_client, hpo_job_name)
assert hpo_response["HyperParameterTuningJobStatus"] == "Completed"
# Verify training image output is an ECR image
training_image = utils.read_from_file_in_tar(
output_files["sagemaker-hyperparameter-tuning"]["training_image"],
"training_image.txt",
)
print(f"Training image used: {training_image}")
if "ExpectedTrainingImage" in test_params.keys():
assert test_params["ExpectedTrainingImage"] == training_image
else:
assert f"dkr.ecr.{region}.amazonaws.com" in training_image
# Verify Training job was part of HPO job, returned as best and was successful
best_training_job_name = utils.read_from_file_in_tar(
output_files["sagemaker-hyperparameter-tuning"]["best_job_name"],
"best_job_name.txt",
)
print(f"best training job name: {best_training_job_name}")
train_response = sagemaker_utils.describe_training_job(
sagemaker_client, best_training_job_name
)
assert train_response["TuningJobArn"] == hpo_response["HyperParameterTuningJobArn"]
assert (
train_response["TrainingJobName"]
== hpo_response["BestTrainingJob"]["TrainingJobName"]
)
assert train_response["TrainingJobStatus"] == "Completed"
# Verify model artifacts output was generated from this run
model_artifact_url = utils.read_from_file_in_tar(
output_files["sagemaker-hyperparameter-tuning"]["model_artifact_url"],
"model_artifact_url.txt",
)
print(f"model_artifact_url: {model_artifact_url}")
assert model_artifact_url == train_response["ModelArtifacts"]["S3ModelArtifacts"]
assert best_training_job_name in model_artifact_url
# Verify hyper_parameters output is not empty
hyper_parameters = json.loads(
utils.read_from_file_in_tar(
output_files["sagemaker-hyperparameter-tuning"]["best_hyperparameters"],
"best_hyperparameters.txt",
)
)
print(f"HPO best hyperparameters: {json.dumps(hyper_parameters, indent = 2)}")
assert hyper_parameters is not None
utils.remove_dir(download_dir)
| kubeflow/kfp-tekton-backend | components/aws/sagemaker/tests/integration_tests/component_tests/test_hpo_component.py | Python | apache-2.0 | 4,223 |
#! /usr/bin/env python
###############################################################################
#
# simulavr - A simulator for the Atmel AVR family of microcontrollers.
# Copyright (C) 2001, 2002 Theodore A. Roth
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
###############################################################################
#
# $Id: test_LD_Y_decr.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $
#
"""Test the LD_Y_decr opcode.
"""
import base_test
from registers import Reg, SREG
class LD_Y_decr_TestFail(base_test.TestFail): pass
class base_LD_Y_decr(base_test.opcode_test):
"""Generic test case for testing LD_Y_decr opcode.
LD_Y_decr - Load Indirect from data space to Register using index Y and
pre decrement Y.
Operation: Y <- Y - 1 then Rd <- (Y)
opcode is '1001 000d dddd 1010' where 0 <= d <= 31 and d != {28,29}
Only registers PC, R28, R29 and Rd should be changed.
"""
def setup(self):
# Set the register values
self.setup_regs[self.Rd] = 0
self.setup_regs[Reg.R28] = (self.Y & 0xff)
self.setup_regs[Reg.R29] = ((self.Y >> 8) & 0xff)
# set up the val in memory (memory is read after Y is decremented,
# thus we need to write to memory _at_ Y - 1)
self.mem_byte_write( self.Y - 1, self.Vd )
# Return the raw opcode
return 0x900A | (self.Rd << 4)
def analyze_results(self):
self.reg_changed.extend( [self.Rd, Reg.R28, Reg.R29] )
# check that result is correct
expect = self.Vd
got = self.anal_regs[self.Rd]
if expect != got:
self.fail('LD_Y_decr: expect=%02x, got=%02x' % (expect, got))
# check that Y was decremented
expect = self.Y - 1
got = (self.anal_regs[Reg.R28] & 0xff) | ((self.anal_regs[Reg.R29] << 8) & 0xff00)
if expect != got:
self.fail('LD_Y_decr Y not decr: expect=%04x, got=%04x' % (expect, got))
#
# Template code for test case.
# The fail method will raise a test specific exception.
#
template = """
class LD_Y_decr_r%02d_Y%04x_v%02x_TestFail(LD_Y_decr_TestFail): pass
class test_LD_Y_decr_r%02d_Y%04x_v%02x(base_LD_Y_decr):
Rd = %d
Y = 0x%x
Vd = 0x%x
def fail(self,s):
raise LD_Y_decr_r%02d_Y%04x_v%02x_TestFail, s
"""
#
# automagically generate the test_LD_Y_decr_rNN_vXX class definitions.
#
# Operation is undefined for d = 28 and d = 29.
#
code = ''
for d in range(0,28)+range(30,32):
for y in (0x10f, 0x1ff):
for v in (0xaa, 0x55):
args = (d,y,v)*4
code += template % args
exec code
| simark/simulavr | regress/test_opcodes/test_LD_Y_decr.py | Python | gpl-2.0 | 3,084 |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from .mediaitem import ImageMediaItem
from .imagetab import ImageTab
| marmyshev/item_title | openlp/plugins/images/lib/__init__.py | Python | gpl-2.0 | 2,176 |
import pandas as pd
import logging, os
import ipaddress
#from pprint import pprint
#results = []
#with open('http.log') as inputfile:
# for row in csv.reader(inputfile):
# results.append(row)
#with pandas to pull stats if you want by higest nlargest lowest nsmallest
log = logging.getLogger(__name__)
class bro_ingestion:
"""Parse nmap related data
Attributes:
directory_to_parse: path to nmap data
move_files_after_parsing: set to false if we want to retain files after parsing
delog: to remove files in set directory
inFile: to read data in log"""
def __init__(self, directory_to_parse, move_files_after_parsing = True):
self.directory = directory_to_parse
self.flag = move_files_after_parsing
def move_data(delog):
#excute an os command to read all files in directory and delete
os.system('rm '+delog)
def bro_parse(inFile):
df = pd.read_csv( inFile ,sep='\t', skiprows=[7], header=6)
df_modified = df[df.columns[:-1]]
df_modified.columns = df.columns[1:]
tx_host = df_modified.groupby('tx_hosts')['ts'].count().sort_values().nlargest(25)
rx_host = df_modified.groupby('rx_hosts')['ts'].count().sort_values()
mime_type = df_modified.groupby('mime_type')['ts'].count().sort_values()
md5 = df_modified.groupby('md5')['ts'].count().sort_values()
txsumvalues = df_modified.groupby('tx_hosts')['seen_bytes'].sum().sort_values()
txuniquevalues = df_modified.groupby('tx_hosts')['mime_type'].nunique().sort_values()
rxsumvalues = df_modified.groupby('rx_hosts')['seen_bytes'].sum().sort_values()
rxuniquevalues = df_modified.groupby('rx_hosts')['mime_type'].nunique().sort_values()
txrx_sb = df_modified.groupby(['tx_hosts', 'rx_hosts'])['seen_bytes'].sum().sort_values()
bro_df_modified['c_public'] = bro_df['tx_hosts'].apply(lambda x: ipaddress.ip_address(x).is_global)
bro_df_modified['c_private'] = bro_df['tx_hosts'].apply(lambda x: ipaddress.ip_address(x).is_private)
#def logwrite(bro):
# df_modified = df_modified.to_csv('bro.csv', index=None, encoding='utf-8')
return (df.modified)
def ip_address_public(inputsr):
try:
return ipaddress.ip_address(inputsr).is_global
except ValueError as err:
print('Error parsing IP Address:', inputsr, ' with error: ', err)
return False
| jzadeh/chiron-elk | parsers/broingest.py | Python | apache-2.0 | 2,439 |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
try:
from PyQt4.QtGui import QApplication, QKeySequence
except ImportError:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QKeySequence
from pygs import QxtGlobalShortcut
def hotkeyBinding():
SHORTCUT_SHOW = "Ctrl+Alt+S" # Ctrl maps to Command on Mac OS X
SHORTCUT_EXIT = "Ctrl+Alt+F" # again, Ctrl maps to Command on Mac OS X
def show_activated():
print("Shortcut Activated!")
app = QApplication([])
shortcut_show = QxtGlobalShortcut()
shortcut_show.setShortcut(QKeySequence(SHORTCUT_SHOW))
shortcut_show.activated.connect(show_activated)
shortcut_exit = QxtGlobalShortcut()
shortcut_exit.setShortcut(QKeySequence(SHORTCUT_EXIT))
shortcut_exit.activated.connect(app.exit)
return_code = app.exec_()
del shortcut_show
del shortcut_exit
sys.exit(return_code)
| LeunamBk/translatorPy | globalHotkeys.py | Python | gpl-3.0 | 950 |
import json
from urllib import urlencode
from social_auth.backends import BaseOAuth2, OAuthBackend
from social_auth.utils import dsa_urlopen
class AppsfuelBackend(OAuthBackend):
name = 'appsfuel'
def get_user_id(self, details, response):
return response['user_id']
def get_user_details(self, response):
"""Return user details from Appsfuel account"""
fullname = response.get('display_name', '')
email = response.get('email', '')
username = email.split('@')[0] if email else ''
return {
'username': username,
'first_name': fullname,
'email': email
}
class AppsfuelAuth(BaseOAuth2):
"""Appsfuel OAuth mechanism"""
AUTH_BACKEND = AppsfuelBackend
AUTHORIZATION_URL = 'http://app.appsfuel.com/content/permission'
ACCESS_TOKEN_URL = 'https://api.appsfuel.com/v1/live/oauth/token'
USER_URL = 'https://api.appsfuel.com/v1/live/user'
SETTINGS_KEY_NAME = 'APPSFUEL_CLIENT_ID'
SETTINGS_SECRET_NAME = 'APPSFUEL_CLIENT_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
params = {'access_token': access_token}
url = self.USER_URL + '?' + urlencode(params)
return json.load(dsa_urlopen(url))
class AppsfuelSandboxBackend(AppsfuelBackend):
name = 'appsfuel-sandbox'
class AppsfuelSandboxAuth(AppsfuelAuth):
AUTH_BACKEND = AppsfuelSandboxBackend
AUTHORIZATION_URL = 'https://api.appsfuel.com/v1/sandbox/choose'
ACCESS_TOKEN_URL = 'https://api.appsfuel.com/v1/sandbox/oauth/token'
USER_URL = 'https://api.appsfuel.com/v1/sandbox/user'
# Backend definitions
BACKENDS = {
'appsfuel': AppsfuelAuth,
'appsfuel-sandbox': AppsfuelSandboxAuth,
}
| AppsFuel/django-social-auth-appsfuel | django_social_auth_appsfuel/backend.py | Python | bsd-3-clause | 1,775 |
#!/usr/bin/env python
#
# Copyright 2005,2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, filter, blocks
class test_single_pole_iir_filter(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_ff_001(self):
src_data = (0, 1000, 2000, 3000, 4000, 5000)
expected_result = src_data
src = blocks.vector_source_f(src_data)
op = filter.single_pole_iir_filter_ff(1.0)
dst = blocks.vector_sink_f()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data)
def test_ff_002(self):
src_data = (0, 1000, 2000, 3000, 4000, 5000)
expected_result = (0, 125, 359.375, 689.453125, 1103.271484, 1590.36255)
src = blocks.vector_source_f(src_data)
op = filter.single_pole_iir_filter_ff(0.125)
dst = blocks.vector_sink_f()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 3)
def test_ff_003(self):
block_size = 2
src_data = (0, 1000, 2000, 3000, 4000, 5000)
expected_result = (0, 125, 250, 484.375, 718.75, 1048.828125)
src = blocks.vector_source_f(src_data)
s2p = blocks.stream_to_vector(gr.sizeof_float, block_size)
op = filter.single_pole_iir_filter_ff (0.125, block_size)
p2s = blocks.vector_to_stream(gr.sizeof_float, block_size)
dst = blocks.vector_sink_f()
self.tb.connect(src, s2p, op, p2s, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 3)
def test_cc_001(self):
src_data = (0+0j, 1000+1000j, 2000+2000j, 3000+3000j, 4000+4000j, 5000+5000j)
expected_result = src_data
src = blocks.vector_source_c(src_data)
op = filter.single_pole_iir_filter_cc(1.0)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data)
def test_cc_002(self):
src_data = (complex(0,0), complex(1000,-1000), complex(2000,-2000),
complex(3000,-3000), complex(4000,-4000), complex(5000,-5000))
expected_result = (complex(0,0), complex(125,-125), complex(359.375,-359.375),
complex(689.453125,-689.453125), complex(1103.271484,-1103.271484),
complex(1590.36255,-1590.36255))
src = blocks.vector_source_c(src_data)
op = filter.single_pole_iir_filter_cc(0.125)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 3)
def test_cc_003(self):
block_size = 2
src_data = (complex(0,0), complex(1000,-1000), complex(2000,-2000),
complex(3000,-3000), complex(4000,-4000), complex(5000,-5000))
expected_result = (complex(0,0), complex(125,-125), complex(250,-250),
complex(484.375,-484.375), complex(718.75,-718.75),
complex(1048.828125,-1048.828125))
src = blocks.vector_source_c(src_data)
s2p = blocks.stream_to_vector(gr.sizeof_gr_complex, block_size)
op = filter.single_pole_iir_filter_cc(0.125, block_size)
p2s = blocks.vector_to_stream(gr.sizeof_gr_complex, block_size)
dst = blocks.vector_sink_c()
self.tb.connect(src, s2p, op, p2s, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 3)
if __name__ == '__main__':
gr_unittest.run(test_single_pole_iir_filter, "test_single_pole_iir_filter.xml")
| skoslowski/gnuradio | gr-filter/python/filter/qa_single_pole_iir.py | Python | gpl-3.0 | 4,113 |
import wikipedia
from bot import command, msg
class WikiCmd(command.BotCommand):
def run(self, dest, contents):
if len(contents) < 2:
return msg.BotMsg('Usage: .wiki [search terms]')
search_str = ' '.join(contents[1:])
results = wikipedia.search(search_str)
if not results:
return msg.BotMsg('There were no results matching the query.')
try:
summary = wikipedia.summary(results[0], sentences = 2)
except wikipedia.exceptions.DisambiguationError as e:
if not e.options:
return msg.BotMsg('There were no results matching the query.')
try:
summary = wikipedia.summary(e.options[0], sentences = 2)
except:
return msg.BotMsg('Query was too ambiguous')
return msg.BotMsg(summary)
command_instance = WikiCmd(bindings = ['wiki', 'w'], name = 'wiki')
| federicotdn/python-tgbot | commands/wikipedia_cmd.py | Python | mit | 793 |
import numpy as np
from keras.layers import Activation, Dense
from keras.models import Sequential, model_from_json
from keras.optimizers import Adam
from . import dataset, utils
def train(summarize=False, data_limit=None):
X_train, y_train = dataset.Speech2Phonemes().load_train_data(limit=data_limit)
print("length of training data: ", len(X_train))
# Number of features for each sample in X_train...
# if each 20ms corresponds to 13 MFCC coefficients + delta + delta2, then 39
input_dim = X_train.shape[1]
# Number of distinct classes in the dataset (number of distinct phonemes)
output_dim = np.max(y_train) + 1
# Model takes as input arrays of shape (*, input_dim) and outputs arrays
# of shape (*, hidden_num)
hidden_num = 256
y_train_onehot = utils.onehot_matrix(y_train, output_dim)
print("1")
# Architecture of the model
model = Sequential()
print("2")
model.add(Dense(input_dim=input_dim, output_dim=hidden_num))
print("2b")
model.add(Activation('sigmoid'))
print("2c")
#model.add(Dropout(0.25))
print("2d")
model.add(Dense(output_dim=output_dim))
model.add(Activation('softmax'))
print("3")
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001))
stats = model.fit(X_train, y_train_onehot,
shuffle=True,
batch_size=256,
nb_epoch=200,
verbose=1
)
print("4")
save_model(model)
print("5")
if summarize:
print(model.summary())
import matplotlib.pyplot as plt
plt.plot(stats.history['loss'])
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.title('Loss function for %d samples' % X_train.shape[0])
plt.show()
def test(data_limit=None):
model = load_model()
X_test, y_test = dataset.Speech2Phonemes().load_test_data()
out = model.predict_classes(X_test,
batch_size=256,
verbose=0
)
acc = sum(out == y_test) * 1.0 / len(out)
print('Accuracy using %d testing samples: %f' % (X_test.shape[0], acc))
def predict(X_test):
model = load_model()
return model.predict_classes(X_test,
batch_size=256,
verbose=0
)
def save_model(model):
reader = dataset.Speech2Phonemes()
with open(reader.params('speech2phonemes_arch', 'json'), 'w') as archf:
archf.write(model.to_json())
model.save_weights(
filepath=reader.params('speech2phonemes_weights', 'h5'),
overwrite=True
)
def load_model():
reader = dataset.Speech2Phonemes()
with open(reader.params('speech2phonemes_arch', 'json')) as arch:
model = model_from_json(arch.read())
model.load_weights(reader.params('speech2phonemes_weights', 'h5'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001))
return model
if __name__ == "__main__":
train() | matthijsvk/multimodalSR | code/audioSR/Experiments/spoken-command-processor/model/speech2phonemes.py | Python | mit | 2,910 |
from p2pool.bitcoin import networks
from p2pool.util import math
# CHAIN_LENGTH = number of shares back client keeps
# REAL_CHAIN_LENGTH = maximum number of shares back client uses to compute payout
# REAL_CHAIN_LENGTH must always be <= CHAIN_LENGTH
# REAL_CHAIN_LENGTH must be changed in sync with all other clients
# changes can be done by changing one, then the other
nets = dict(
copperbars=math.Object(
PARENT=networks.nets['copperbars'],
SHARE_PERIOD=5, # seconds
CHAIN_LENGTH=2*60*60//5, # shares
REAL_CHAIN_LENGTH=2*60*60//5, # shares
TARGET_LOOKBEHIND=150, # shares
SPREAD=10, # blocks
IDENTIFIER='d138e5b9e7923515'.decode('hex'),
PREFIX='e206c3a24ee749b5'.decode('hex'),
P2P_PORT=7870,
MIN_TARGET=0,
MAX_TARGET=2**256//2**22 - 1,
PERSIST=False,
WORKER_PORT=9777,
BOOTSTRAP_ADDRS=' '.split(' '),
ANNOUNCE_CHANNEL='#asdasdasdp2pool-alt',
VERSION_CHECK=lambda v: v >= 60004,
),
copperbars_testnet=math.Object(
PARENT=networks.nets['copperbars_testnet'],
SHARE_PERIOD=3, # seconds
CHAIN_LENGTH=20*60//3, # shares
REAL_CHAIN_LENGTH=20*60//3, # shares
TARGET_LOOKBEHIND=200, # shares
SPREAD=3, # blocks
IDENTIFIER='f037d5b8c7923510'.decode('hex'),
PREFIX='8208c1a54ef649b0'.decode('hex'),
P2P_PORT=17875,
MIN_TARGET=0,
MAX_TARGET=2**256//2**14 - 1,
PERSIST=False,
WORKER_PORT=18336,
BOOTSTRAP_ADDRS=' '.split(' '),
ANNOUNCE_CHANNEL='#p2pool-alt',
VERSION_CHECK=lambda v: v >= 60004,
),
)
for net_name, net in nets.iteritems():
net.NAME = net_name
| urpoapina/cpr-p2pool | p2pool/networks.py | Python | gpl-3.0 | 1,730 |
#!/usr/bin/env python
import pytest
import numpy as np
import sklearn.datasets as datasets
import sklearn.manifold as manifold
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestManifold(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.manifold.LocallyLinearEmbedding,
manifold.LocallyLinearEmbedding)
self.assertIs(df.manifold.Isomap, manifold.Isomap)
self.assertIs(df.manifold.MDS, manifold.MDS)
self.assertIs(df.manifold.SpectralEmbedding, manifold.SpectralEmbedding)
self.assertIs(df.manifold.TSNE, manifold.TSNE)
def test_locally_linear_embedding(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.manifold.locally_linear_embedding(3, 3)
expected = manifold.locally_linear_embedding(iris.data, 3, 3)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], pdml.ModelFrame)
tm.assert_index_equal(result[0].index, df.index)
tm.assert_numpy_array_equal(result[0].values, expected[0])
self.assertEqual(result[1], expected[1])
def test_spectral_embedding(self):
N = 10
m = np.random.random_integers(50, 200, size=(N, N))
m = (m + m.T) / 2
df = pdml.ModelFrame(m)
self.assert_numpy_array_almost_equal(df.data.values, m)
result = df.manifold.spectral_embedding(random_state=self.random_state)
expected = manifold.spectral_embedding(m, random_state=self.random_state)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.index)
# signs can be inversed
self.assert_numpy_array_almost_equal(np.abs(result.data.values),
np.abs(expected))
@pytest.mark.parametrize("algo", ['Isomap'])
def test_Isomap(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.manifold, algo)()
mod2 = getattr(manifold, algo)()
df.fit(mod1)
mod2.fit(iris.data)
result = df.transform(mod1)
expected = mod2.transform(iris.data)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.index)
self.assert_numpy_array_almost_equal(result.data.values, expected)
@pytest.mark.parametrize("algo", ['MDS'])
def test_MDS(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.manifold, algo)(random_state=self.random_state)
mod2 = getattr(manifold, algo)(random_state=self.random_state)
result = df.fit_transform(mod1)
expected = mod2.fit_transform(iris.data)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.index)
self.assert_numpy_array_almost_equal(result.data.values, expected)
@pytest.mark.parametrize("algo", ['TSNE'])
def test_TSNE(self, algo):
digits = datasets.load_digits()
df = pdml.ModelFrame(digits)
mod1 = getattr(df.manifold, algo)(n_components=2, random_state=self.random_state)
mod2 = getattr(manifold, algo)(n_components=2, random_state=self.random_state)
# np.random.seed(1)
result = df.fit_transform(mod1)
# np.random.seed(1)
expected = mod2.fit_transform(digits.data)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.index)
self.assert_numpy_array_almost_equal(result.data.shape, expected.shape)
| sinhrks/expandas | pandas_ml/skaccessors/test/test_manifold.py | Python | bsd-3-clause | 3,754 |
"""
This contains the base class for the geometry engine, which proposes new positions
for each additional atom that must be added.
"""
from simtk import unit
import numpy as np
import networkx as nx
from perses.storage import NetCDFStorageView
################################################################################
# Initialize logging
################################################################################
import logging
logging.basicConfig(level=logging.NOTSET)
_logger = logging.getLogger("geometry")
_logger.setLevel(logging.INFO)
################################################################################
# Constants
################################################################################
LOG_ZERO = -1.0e+6
ENERGY_MISMATCH_RATIO_THRESHOLD = 1e-3
ENERGY_THRESHOLD = 1e-6
################################################################################
# Utility methods
################################################################################
def check_dimensionality(quantity, compatible_units):
"""
Ensure that the specified quantity has units compatible with specified unit.
Parameters
----------
quantity : simtk.unit.Quantity or float
The quantity to be checked
compatible_units : simtk.unit.Quantity or simtk.unit.Unit or float
Ensure ``quantity`` is either float or numpy array (if ``float`` specified) or is compatible with the specified units
Raises
------
ValueError if the specified quantity does not have the appropriate dimensionality or type
Returns
-------
is_compatible : bool
Returns True if dimensionality is as requested
"""
if unit.is_quantity(compatible_units) or unit.is_unit(compatible_units):
try:
from simtk.unit.quantity import is_dimensionless
except ModuleNotFoundError:
from openmm.unit.quantity import is_dimensionless
if not is_dimensionless(quantity / compatible_units):
raise ValueError('{} does not have units compatible with expected {}'.format(quantity, compatible_units))
elif compatible_units == float:
if not (isinstance(quantity, float) or isinstance(quantity, np.ndarray)):
raise ValueError("'{}' expected to be a float, but was instead {}".format(quantity, type(quantity)))
else:
raise ValueError("Don't know how to handle compatible_units of {}".format(compatible_units))
# Units are compatible if they pass this point
return True
class GeometryEngine(object):
"""
This is the base class for the geometry engine.
Parameters
----------
metadata : dict
GeometryEngine-related metadata as a dict
"""
def __init__(self, metadata=None, storage=None):
# TODO: Either this base constructor should be called by subclasses, or we should remove its arguments.
pass
def propose(self, top_proposal, current_positions, beta):
"""
Make a geometry proposal for the appropriate atoms.
Parameters
----------
top_proposal : TopologyProposal object
Object containing the relevant results of a topology proposal
beta : float
The inverse temperature
Returns
-------
new_positions : [n, 3] ndarray
The new positions of the system
"""
return np.array([0.0,0.0,0.0])
def logp_reverse(self, top_proposal, new_coordinates, old_coordinates, beta):
"""
Calculate the logp for the given geometry proposal
Parameters
----------
top_proposal : TopologyProposal object
Object containing the relevant results of a topology proposal
new_coordinates : [n, 3] np.ndarray
The coordinates of the system after the proposal
old_coordiantes : [n, 3] np.ndarray
The coordinates of the system before the proposal
direction : str, either 'forward' or 'reverse'
whether the transformation is for the forward NCMC move or the reverse
beta : float
The inverse temperature
Returns
-------
logp : float
The log probability of the proposal for the given transformation
"""
return 0.0
class FFAllAngleGeometryEngine(GeometryEngine):
"""
This is an implementation of GeometryEngine which uses all valence terms and OpenMM
Parameters
----------
use_sterics : bool, optional, default=False
If True, sterics will be used in proposals to minimize clashes.
This may significantly slow down the simulation, however.
n_bond_divisions : int, default 1000
number of bond divisions in choosing the r for added/deleted atoms
n_angle_divisions : int, default 180
number of bond angle divisions in choosing theta for added/deleted atoms
n_torsion_divisions : int, default 360
number of torsion angle divisons in choosing phi for added/deleted atoms
verbose: bool, default True
whether to be verbose in output
storage: bool (or None), default None
whether to use NetCDFStorage
bond_softening_constant : float (between 0, 1), default 1.0
how much to soften bonds
angle_softening_constant : float (between 0, 1), default 1.0
how much to soften angles
neglect_angles : bool, optional, default True
whether to ignore and report on theta angle potentials that add variance to the work
use_14_nonbondeds : bool, default True
whether to consider 1,4 exception interactions in the geometry proposal
NOTE: if this is set to true, then in the HybridTopologyFactory, the argument 'interpolate_old_and_new_14s' must be set to False; visa versa
TODO : remove Context objects and checks since they are clunky and no longer used for troubleshooting
"""
def __init__(self,
metadata=None,
use_sterics=False,
n_bond_divisions=1000,
n_angle_divisions=180,
n_torsion_divisions=360,
verbose=True,
storage=None,
bond_softening_constant=1.0,
angle_softening_constant=1.0,
neglect_angles = False,
use_14_nonbondeds = True):
self._metadata = metadata
self.write_proposal_pdb = False # if True, will write PDB for sequential atom placements
self.pdb_filename_prefix = 'geometry-proposal' # PDB file prefix for writing sequential atom placements
self.nproposed = 0 # number of times self.propose() has been called
self.verbose = verbose
self.use_sterics = use_sterics
self._use_14_nonbondeds = use_14_nonbondeds
# if self.use_sterics: #not currently supported
# raise Exception("steric contributions are not currently supported.")
self._n_bond_divisions = n_bond_divisions
self._n_angle_divisions = n_angle_divisions
self._n_torsion_divisions = n_torsion_divisions
self._bond_softening_constant = bond_softening_constant
self._angle_softening_constant = angle_softening_constant
if storage:
self._storage = NetCDFStorageView(modname="GeometryEngine", storage=storage)
else:
self._storage = None
self.neglect_angles = neglect_angles
def propose(self, top_proposal, current_positions, beta, validate_energy_bookkeeping = True):
"""
Make a geometry proposal for the appropriate atoms.
Parameters
----------
top_proposal : TopologyProposal object
Object containing the relevant results of a topology proposal
current_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The current positions
beta : simtk.unit.Quantity with units compatible with 1/(kilojoules_per_mole)
The inverse thermal energy
validate_energy_bookkeeping : bool
whether to validate the energy mismatch ratio; this is no longer strictly necessary, and will certainly fail if ring closure or non-conservative perturbations are conducted
(non-conservative transformations are defined as transformations wherein not _all_ of the valence energies are used to make topology proposals...)
Returns
-------
new_positions : [n, 3] ndarray
The new positions of the system
logp_proposal : float
The log probability of the forward-only proposal
"""
_logger.info("propose: performing forward proposal")
# Ensure positions have units compatible with nanometers
check_dimensionality(current_positions, unit.nanometers)
check_dimensionality(beta, unit.kilojoules_per_mole**(-1))
# TODO: Change this to use md_unit_system instead of hard-coding nanometers
if not top_proposal.unique_new_atoms:
_logger.info("propose: there are no unique new atoms; logp_proposal = 0.0.")
# If there are no unique new atoms, return new positions in correct order for new topology object and log probability of zero
# TODO: Carefully check this
import parmed
structure = parmed.openmm.load_topology(top_proposal.old_topology, top_proposal._old_system)
atoms_with_positions = [ structure.atoms[atom_idx] for atom_idx in top_proposal.new_to_old_atom_map.keys() ]
new_positions = self._copy_positions(atoms_with_positions, top_proposal, current_positions)
logp_proposal, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms = 0.0, None, None, None, None
self.forward_final_growth_system = None
else:
_logger.info("propose: unique new atoms detected; proceeding to _logp_propose...")
logp_proposal, new_positions, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms, omitted_terms = self._logp_propose(top_proposal, current_positions, beta, direction='forward', validate_energy_bookkeeping = validate_energy_bookkeeping)
self.nproposed += 1
check_dimensionality(new_positions, unit.nanometers)
check_dimensionality(logp_proposal, float)
#define forward attributes
self.forward_rjmc_info = rjmc_info
self.forward_atoms_with_positions_reduced_potential, self.forward_final_context_reduced_potential = atoms_with_positions_reduced_potential, final_context_reduced_potential
self.forward_neglected_angle_terms = neglected_angle_terms
return new_positions, logp_proposal
def logp_reverse(self, top_proposal, new_coordinates, old_coordinates, beta, validate_energy_bookkeeping = True):
"""
Calculate the logp for the given geometry proposal
Parameters
----------
top_proposal : TopologyProposal object
Object containing the relevant results of a topology proposal
new_coordinates : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The coordinates of the system after the proposal
old_coordiantes : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The coordinates of the system before the proposal
beta : simtk.unit.Quantity with units compatible with 1/(kilojoules_per_mole)
The inverse thermal energy
validate_energy_bookkeeping : bool
whether to validate the energy mismatch ratio; this is no longer strictly necessary, and will certainly fail if ring closure or non-conservative perturbations are conducted
(non-conservative transformations are defined as transformations wherein not _all_ of the valence energies are used to make topology proposals...)
Returns
-------
logp : float
The log probability of the proposal for the given transformation
"""
_logger.info("logp_reverse: performing reverse proposal")
check_dimensionality(new_coordinates, unit.nanometers)
check_dimensionality(old_coordinates, unit.nanometers)
check_dimensionality(beta, unit.kilojoules_per_mole**(-1))
# If there are no unique old atoms, the log probability is zero.
if not top_proposal.unique_old_atoms:
_logger.info("logp_reverse: there are no unique old atoms; logp_proposal = 0.0.")
#define reverse attributes
self.reverse_new_positions, self.reverse_rjmc_info, self.reverse_atoms_with_positions_reduced_potential, self.reverse_final_context_reduced_potential, self.reverse_neglected_angle_terms = None, None, None, None, None
self.reverse_final_growth_system = None
return 0.0
# Compute log proposal probability for reverse direction
_logger.info("logp_reverse: unique new atoms detected; proceeding to _logp_propose...")
logp_proposal, new_positions, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms, omitted_terms = self._logp_propose(top_proposal, old_coordinates, beta, new_positions=new_coordinates, direction='reverse', validate_energy_bookkeeping = validate_energy_bookkeeping)
self.reverse_new_positions, self.reverse_rjmc_info = new_positions, rjmc_info
self.reverse_atoms_with_positions_reduced_potential, self.reverse_final_context_reduced_potential = atoms_with_positions_reduced_potential, final_context_reduced_potential
self.reverse_neglected_angle_terms = neglected_angle_terms
check_dimensionality(logp_proposal, float)
return logp_proposal
def _write_partial_pdb(self, pdbfile, topology, positions, atoms_with_positions, model_index):
"""
Write the subset of the molecule for which positions are defined.
Parameters
----------
pdbfile : file-like object
The open file-like object for the PDB file being written
topology : simtk.openmm.Topology
The OpenMM Topology object
positions : simtk.unit.Quantity of shape (n_atoms, 3) with units compatible with nanometers
The positions
atoms_with_positions : list of parmed.Atom
parmed Atom objects for which positions have been defined
model_index : int
The MODEL index for the PDB file to use
"""
check_dimensionality(positions, unit.nanometers)
from simtk.openmm.app import Modeller
modeller = Modeller(topology, positions)
atom_indices_with_positions = [ atom.idx for atom in atoms_with_positions ]
atoms_to_delete = [ atom for atom in modeller.topology.atoms() if (atom.index not in atom_indices_with_positions) ]
modeller.delete(atoms_to_delete)
pdbfile.write('MODEL %5d\n' % model_index)
from simtk.openmm.app import PDBFile
PDBFile.writeFile(modeller.topology, modeller.positions, file=pdbfile)
pdbfile.flush()
pdbfile.write('ENDMDL\n')
def _logp_propose(self, top_proposal, old_positions, beta, new_positions=None, direction='forward', validate_energy_bookkeeping = True):
"""
This is an INTERNAL function that handles both the proposal and the logp calculation,
to reduce code duplication. Whether it proposes or just calculates a logp is based on
the direction option. Note that with respect to "new" and "old" terms, "new" will always
mean the direction we are proposing (even in the reverse case), so that for a reverse proposal,
this function will still take the new coordinates as new_coordinates
Parameters
----------
top_proposal : topology_proposal.TopologyProposal object
topology proposal containing the relevant information
old_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The coordinates of the system before the proposal
beta : simtk.unit.Quantity with units compatible with 1/(kilojoules_per_mole)
The inverse thermal energy
new_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers, optional, default=None
The coordinates of the system after the proposal, or None for forward proposals
direction : str
Whether to make a proposal ('forward') or just calculate logp ('reverse')
validate_energy_bookkeeping : bool
whether to validate the energy mismatch ratio; this is no longer strictly necessary, and will certainly fail if ring closure or non-conservative perturbations are conducted
(non-conservative transformations are defined as transformations wherein not _all_ of the valence energies are used to make topology proposals...)
Returns
-------
logp_proposal : float
the logp of the proposal
new_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
The new positions (same as input if direction='reverse')
rjmc_info: list
List of proposal information, of form [atom.idx, u_r, u_theta, r, theta, phi, logp_r, logp_theta, logp_phi, np.log(detJ), added_energy, proposal_prob]
atoms_with_positions_reduced_potential : float
energy of core atom configuration (i.e. before any proposal is made).
final_context_reduced_potential : float
enery of final system (corrected for valence-only and whether angles are neglected). In reverse regime, this is the old system.
neglected_angle_terms : list of ints
list of indices corresponding to the angle terms in the corresponding system that are neglected (i.e. which are to be
placed into the lambda perturbation scheme)
omitted_growth_terms : dict
dictionary of terms that have been omitted in the proposal
the dictionary carries indices corresponding to the new or old topology, depending on whether the proposal is forward, or reverse (respectively)
"""
_logger.info("Conducting forward proposal...")
import copy
from perses.tests.utils import compute_potential_components
# Ensure all parameters have the expected units
check_dimensionality(old_positions, unit.angstroms)
if new_positions is not None:
check_dimensionality(new_positions, unit.angstroms)
# Determine order in which atoms (and the torsions they are involved in) will be proposed
_logger.info("Computing proposal order with NetworkX...")
proposal_order_tool = NetworkXProposalOrder(top_proposal, direction=direction)
torsion_proposal_order, logp_choice, omitted_bonds = proposal_order_tool.determine_proposal_order()
atom_proposal_order = [ torsion[0] for torsion in torsion_proposal_order ]
# some logs for clarity
_logger.info(f"number of atoms to be placed: {len(atom_proposal_order)}")
_logger.info(f"Atom index proposal order is {atom_proposal_order}")
_logger.info(f"omitted_bonds: {omitted_bonds}")
growth_parameter_name = 'growth_stage'
if direction=="forward":
_logger.info("direction of proposal is forward; creating atoms_with_positions and new positions from old system/topology...")
# Find and copy known positions to match new topology
import parmed
structure = parmed.openmm.load_topology(top_proposal.new_topology, top_proposal.new_system)
atoms_with_positions = [structure.atoms[atom_idx] for atom_idx in top_proposal.new_to_old_atom_map.keys()]
new_positions = self._copy_positions(atoms_with_positions, top_proposal, old_positions)
self._new_posits = copy.deepcopy(new_positions)
# Create modified System object
_logger.info("creating growth system...")
growth_system_generator = GeometrySystemGenerator(top_proposal.new_system,
torsion_proposal_order,
omitted_bonds = omitted_bonds,
reference_topology = top_proposal._new_topology,
global_parameter_name=growth_parameter_name,
use_sterics=self.use_sterics,
neglect_angles = self.neglect_angles,
use_14_nonbondeds = self._use_14_nonbondeds)
growth_system = growth_system_generator.get_modified_system()
elif direction=='reverse':
_logger.info("direction of proposal is reverse; creating atoms_with_positions from old system/topology")
if new_positions is None:
raise ValueError("For reverse proposals, new_positions must not be none.")
# Find and copy known positions to match old topology
import parmed
structure = parmed.openmm.load_topology(top_proposal.old_topology, top_proposal.old_system)
atoms_with_positions = [structure.atoms[atom_idx] for atom_idx in top_proposal.old_to_new_atom_map.keys()]
# Create modified System object
_logger.info("creating growth system...")
growth_system_generator = GeometrySystemGenerator(top_proposal.old_system,
torsion_proposal_order,
omitted_bonds = omitted_bonds,
reference_topology = top_proposal._old_topology,
global_parameter_name=growth_parameter_name,
use_sterics=self.use_sterics,
neglect_angles = self.neglect_angles,
use_14_nonbondeds = self._use_14_nonbondeds)
growth_system = growth_system_generator.get_modified_system()
else:
raise ValueError("Parameter 'direction' must be forward or reverse")
# Define a system for the core atoms before new atoms are placed
self.atoms_with_positions_system = growth_system_generator._atoms_with_positions_system
self.growth_system = growth_system
# Get the angle terms that are neglected from the growth system
neglected_angle_terms = growth_system_generator.neglected_angle_terms
_logger.info(f"neglected angle terms include {neglected_angle_terms}")
# Rename the logp_choice from the NetworkXProposalOrder for the purpose of adding logPs in the growth stage
logp_proposal = np.sum(np.array(logp_choice))
_logger.info(f"log probability choice of torsions and atom order: {logp_proposal}")
if self._storage:
self._storage.write_object("{}_proposal_order".format(direction), proposal_order_tool, iteration=self.nproposed)
platform_name = 'CUDA'
# Create an OpenMM context
from simtk import openmm
from perses.dispersed.utils import configure_platform
_logger.info("creating platform, integrators, and contexts; setting growth parameter")
platform = configure_platform(platform_name, fallback_platform_name='Reference', precision='double')
integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
atoms_with_positions_system_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
final_system_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
context = openmm.Context(growth_system, integrator, platform)
growth_system_generator.set_growth_parameter_index(len(atom_proposal_order)+1, context)
#create final growth contexts for nonalchemical perturbations...
if direction == 'forward':
self.forward_final_growth_system = copy.deepcopy(context.getSystem())
elif direction == 'reverse':
self.reverse_final_growth_system = copy.deepcopy(context.getSystem())
growth_parameter_value = 1 # Initialize the growth_parameter value before the atom placement loop
# In the forward direction, atoms_with_positions_system considers the atoms_with_positions
# In the reverse direction, atoms_with_positions_system considers the old_positions of atoms in the
atoms_with_positions_context = openmm.Context(self.atoms_with_positions_system, atoms_with_positions_system_integrator, platform)
if direction == 'forward':
_logger.info("setting atoms_with_positions context new positions")
atoms_with_positions_context.setPositions(new_positions)
else:
_logger.info("setting atoms_with_positions context old positions")
atoms_with_positions_context.setPositions(old_positions)
#Print the energy of the system before unique_new/old atoms are placed...
state = atoms_with_positions_context.getState(getEnergy=True)
atoms_with_positions_reduced_potential = beta*state.getPotentialEnergy()
atoms_with_positions_reduced_potential_components = [(force, energy) for force, energy in compute_potential_components(atoms_with_positions_context)]
_logger.debug(f'atoms_with_positions_reduced_potential_components:')
for f, e in atoms_with_positions_reduced_potential_components:
_logger.debug(f'\t{f} : {e}')
atoms_with_positions_methods_differences = abs(atoms_with_positions_reduced_potential - sum([i[1] for i in atoms_with_positions_reduced_potential_components]))
_logger.debug(f'Diffence in energy on adding unique atoms: {atoms_with_positions_methods_differences}')
assert atoms_with_positions_methods_differences < ENERGY_THRESHOLD, f"the difference between the atoms_with_positions_reduced_potential and the sum of atoms_with_positions_reduced_potential_components is {atoms_with_positions_methods_differences}"
# Place each atom in predetermined order
_logger.info("There are {} new atoms".format(len(atom_proposal_order)))
rjmc_info = list()
energy_logger = [] #for bookkeeping per_atom energy reduced potentials
for torsion_atom_indices, proposal_prob in zip(torsion_proposal_order, logp_choice):
_logger.debug(f"Proposing torsion {torsion_atom_indices} with proposal probability {proposal_prob}")
# Get parmed Structure Atom objects associated with torsion
atom, bond_atom, angle_atom, torsion_atom = [ structure.atoms[index] for index in torsion_atom_indices ]
# Activate the new atom interactions
growth_system_generator.set_growth_parameter_index(growth_parameter_value, context=context)
# Get internal coordinates if direction is reverse
if direction == 'reverse':
atom_coords, bond_coords, angle_coords, torsion_coords = [ old_positions[index] for index in torsion_atom_indices ]
internal_coordinates, detJ = self._cartesian_to_internal(atom_coords, bond_coords, angle_coords, torsion_coords)
# Extract dimensionless internal coordinates
r, theta, phi = internal_coordinates[0], internal_coordinates[1], internal_coordinates[2] # dimensionless
_logger.debug(f"\treverse proposal: r = {r}; theta = {theta}; phi = {phi}")
bond = self._get_relevant_bond(atom, bond_atom)
if bond is not None:
if direction == 'forward':
r = self._propose_bond(bond, beta, self._n_bond_divisions)
_logger.debug(f"\tproposing forward bond of {r}.")
logp_r = self._bond_logp(r, bond, beta, self._n_bond_divisions)
_logger.debug(f"\tlogp_r = {logp_r}.")
# Retrieve relevant quantities for valence bond and compute u_r
r0, k = bond.type.req, bond.type.k * self._bond_softening_constant
sigma_r = unit.sqrt((1.0/(beta*k)))
r0, k, sigma_r = r0.value_in_unit_system(unit.md_unit_system), k.value_in_unit_system(unit.md_unit_system), sigma_r.value_in_unit_system(unit.md_unit_system)
u_r = 0.5*((r - r0)/sigma_r)**2
_logger.debug(f"\treduced r potential = {u_r}.")
else:
if direction == 'forward':
constraint = self._get_bond_constraint(atom, bond_atom, top_proposal.new_system)
if constraint is None:
raise ValueError("Structure contains a topological bond [%s - %s] with no constraint or bond information." % (str(atom), str(bond_atom)))
r = constraint.value_in_unit_system(unit.md_unit_system) #set bond length to exactly constraint
_logger.debug(f"\tproposing forward constrained bond of {r} with log probability of 0.0 and implied u_r of 0.0.")
logp_r = 0.0
u_r = 0.0
# Propose an angle and calculate its log probability
angle = self._get_relevant_angle(atom, bond_atom, angle_atom)
if direction=='forward':
theta = self._propose_angle(angle, beta, self._n_angle_divisions)
_logger.debug(f"\tproposing forward angle of {theta}.")
logp_theta = self._angle_logp(theta, angle, beta, self._n_angle_divisions)
_logger.debug(f"\t logp_theta = {logp_theta}.")
# Retrieve relevant quantities for valence angle and compute u_theta
theta0, k = angle.type.theteq, angle.type.k * self._angle_softening_constant
sigma_theta = unit.sqrt(1.0/(beta * k))
theta0, k, sigma_theta = theta0.value_in_unit_system(unit.md_unit_system), k.value_in_unit_system(unit.md_unit_system), sigma_theta.value_in_unit_system(unit.md_unit_system)
u_theta = 0.5*((theta - theta0)/sigma_theta)**2
_logger.info(f"\treduced angle potential = {u_theta}.")
# Propose a torsion angle and calcualate its log probability
if direction=='forward':
# Note that (r, theta) are dimensionless here
phi, logp_phi = self._propose_torsion(context, torsion_atom_indices, new_positions, r, theta, beta, self._n_torsion_divisions)
xyz, detJ = self._internal_to_cartesian(new_positions[bond_atom.idx], new_positions[angle_atom.idx], new_positions[torsion_atom.idx], r, theta, phi)
new_positions[atom.idx] = xyz
_logger.debug(f"\tproposing forward torsion of {phi}.")
_logger.debug(f"\tsetting new_positions[{atom.idx}] to {xyz}. ")
else:
old_positions_for_torsion = copy.deepcopy(old_positions)
# Note that (r, theta, phi) are dimensionless here
logp_phi = self._torsion_logp(context, torsion_atom_indices, old_positions_for_torsion, r, theta, phi, beta, self._n_torsion_divisions)
_logger.debug(f"\tlogp_phi = {logp_phi}")
# Compute potential energy
if direction == 'forward':
context.setPositions(new_positions)
else:
context.setPositions(old_positions)
state = context.getState(getEnergy=True)
reduced_potential_energy = beta*state.getPotentialEnergy()
_logger.debug(f"\taccumulated growth context reduced energy = {reduced_potential_energy}")
#Compute change in energy from previous reduced potential
if growth_parameter_value == 1: # then there is no previous reduced potential so u_phi is simply reduced_potential_energy - u_r - u_theta
added_energy = reduced_potential_energy
else:
previous_reduced_potential_energy = energy_logger[-1]
added_energy = reduced_potential_energy - previous_reduced_potential_energy
_logger.debug(f"growth index {growth_parameter_value} added reduced energy = {added_energy}.")
atom_placement_dict = {'atom_index': atom.idx,
'u_r': u_r,
'u_theta' : u_theta,
'r': r,
'theta': theta,
'phi': phi,
'logp_r': logp_r,
'logp_theta': logp_theta,
'logp_phi': logp_phi,
'log_detJ': np.log(detJ),
'added_energy': added_energy,
'proposal_prob': proposal_prob}
rjmc_info.append(atom_placement_dict)
logp_proposal += logp_r + logp_theta + logp_phi - np.log(detJ) # TODO: Check sign of detJ
growth_parameter_value += 1
energy_logger.append(reduced_potential_energy)
# DEBUG: Write PDB file for placed atoms
atoms_with_positions.append(atom)
_logger.debug(f"\tatom placed, rjmc_info list updated, and growth_parameter_value incremented.")
# assert that the energy of the new positions is ~= atoms_with_positions_reduced_potential + reduced_potential_energy
# The final context is treated in the same way as the atoms_with_positions_context
if direction == 'forward': #if the direction is forward, the final system for comparison is top_proposal's new system
_system, _positions = top_proposal._new_system, new_positions
else:
_system, _positions = top_proposal._old_system, old_positions
if not self.use_sterics:
final_system = self._define_no_nb_system(_system, neglected_angle_terms, atom_proposal_order)
_logger.info(f"{direction} final system defined with {len(neglected_angle_terms)} neglected angles.")
else:
final_system = copy.deepcopy(_system)
force_names = {force.__class__.__name__ : index for index, force in enumerate(final_system.getForces())}
if 'NonbondedForce' in force_names.keys():
final_system.getForce(force_names['NonbondedForce']).setUseDispersionCorrection(False)
_logger.info(f"{direction} final system defined with nonbonded interactions.")
final_context = openmm.Context(final_system, final_system_integrator, platform)
final_context.setPositions(_positions)
state = final_context.getState(getEnergy=True)
final_context_reduced_potential = beta*state.getPotentialEnergy()
final_context_components = [(force, energy*beta) for force, energy in compute_potential_components(final_context)]
atoms_with_positions_reduced_potential_components = [(force, energy*beta) for force, energy in compute_potential_components(atoms_with_positions_context)]
_logger.debug(f"reduced potential components before atom placement:")
for item in atoms_with_positions_reduced_potential_components:
_logger.debug(f"\t\t{item[0]}: {item[1]}")
_logger.info(f"total reduced potential before atom placement: {atoms_with_positions_reduced_potential}")
_logger.debug(f"potential components added from growth system:")
added_energy_components = [(force, energy*beta) for force, energy in compute_potential_components(context)]
for item in added_energy_components:
_logger.debug(f"\t\t{item[0]}: {item[1]}")
#now for the corrected reduced_potential_energy
if direction == 'forward':
positions = new_positions
else:
positions = old_positions
reduced_potential_energy = self._corrected_reduced_potential(growth_system_generator, positions, platform_name, atom_proposal_order, beta)
_logger.info(f"total reduced energy added from growth system: {reduced_potential_energy}")
_logger.debug(f"reduced potential of final system:")
for item in final_context_components:
_logger.debug(f"\t\t{item[0]}: {item[1]}")
_logger.info(f"final reduced energy {final_context_reduced_potential}")
_logger.info(f"sum of energies: {atoms_with_positions_reduced_potential + reduced_potential_energy}")
_logger.info(f"magnitude of difference in the energies: {abs(final_context_reduced_potential - atoms_with_positions_reduced_potential - reduced_potential_energy)}")
energy_mismatch_ratio = (atoms_with_positions_reduced_potential + reduced_potential_energy) / (final_context_reduced_potential)
if validate_energy_bookkeeping:
assert (energy_mismatch_ratio < ENERGY_MISMATCH_RATIO_THRESHOLD + 1) and (energy_mismatch_ratio > 1 - ENERGY_MISMATCH_RATIO_THRESHOLD) , f"The ratio of the calculated final energy to the true final energy is {energy_mismatch_ratio}"
# Final log proposal:
_logger.info("Final logp_proposal: {}".format(logp_proposal))
# Clean up OpenMM Context since garbage collector is sometimes slow
del context; del atoms_with_positions_context; del final_context
del integrator; del atoms_with_positions_system_integrator; del final_system_integrator
check_dimensionality(logp_proposal, float)
check_dimensionality(new_positions, unit.nanometers)
omitted_growth_terms = growth_system_generator.omitted_growth_terms
if self.use_sterics:
return logp_proposal, new_positions, rjmc_info, 0.0, reduced_potential_energy, [], omitted_growth_terms
return logp_proposal, new_positions, rjmc_info, atoms_with_positions_reduced_potential, final_context_reduced_potential, neglected_angle_terms, omitted_growth_terms
def _corrected_reduced_potential(self, growth_system_generator, positions, platform_name, atom_proposal_order, beta):
"""
in order to compute the properly-bookkept energy mismatch, we must define a growth system without the biasing torsions
"""
import copy
from simtk import openmm
from perses.tests.utils import compute_potential_components
_integrator = openmm.VerletIntegrator(1*unit.femtoseconds)
growth_system = copy.deepcopy(growth_system_generator.get_modified_system())
#the last thing to do for bookkeeping is to delete the torsion force associated with the extra ring-closing and chirality restraints
#first, we see if there are two CustomTorsionForce objects...
custom_torsion_forces = [force_index for force_index in range(growth_system.getNumForces()) if growth_system.getForce(force_index).__class__.__name__ == 'CustomTorsionForce']
if len(custom_torsion_forces) == 2:
_logger.debug(f"\tfound 2 custom torsion forces")
#then the first one is the normal growth torsion force object and the second is the added torsion force object used to handle chirality and ring-closing constraints
growth_system.removeForce(max(custom_torsion_forces))
from perses.dispersed.utils import configure_platform
platform = configure_platform(platform_name, fallback_platform_name='Reference', precision='double')
mod_context = openmm.Context(growth_system, _integrator, platform)
growth_system_generator.set_growth_parameter_index(len(atom_proposal_order)+1, mod_context)
mod_context.setPositions(positions)
mod_state = mod_context.getState(getEnergy=True)
modified_reduced_potential_energy = beta * mod_state.getPotentialEnergy()
added_energy_components = [(force, energy) for force, energy in compute_potential_components(mod_context)]
print(f"added energy components: {added_energy_components}")
return modified_reduced_potential_energy
def _define_no_nb_system(self,
system,
neglected_angle_terms,
atom_proposal_order):
"""
This is a quick internal function to generate a final system for an assertion comparison with the energy added in the geometry proposal to the final
energy. Specifically, this function generates a final system (neglecting nonbonded interactions and specified valence terms)
Parameters
----------
system : openmm.app.System object
system of the target (from the topology proposal), which should include all valence, steric, and electrostatic terms
neglected_angle_terms : list of ints
list of HarmonicAngleForce indices corresponding to the neglected terms
Returns
-------
final_system : openmm.app.System object
final system for energy comparison
"""
import copy
from simtk import unit
no_nb_system = copy.deepcopy(system)
_logger.info("\tbeginning construction of no_nonbonded final system...")
_logger.info(f"\tinitial no-nonbonded final system forces {[force.__class__.__name__ for force in list(no_nb_system.getForces())]}")
num_forces = no_nb_system.getNumForces()
for index in reversed(range(num_forces)):
force = no_nb_system.getForce(index)
if force.__class__.__name__ == 'NonbondedForce' or force.__class__.__name__ == 'MonteCarloBarostat':
if self._use_14_nonbondeds and force.__class__.__name__ == 'NonbondedForce':
for particle_index in range(force.getNumParticles()):
[charge, sigma, epsilon] = force.getParticleParameters(particle_index)
force.setParticleParameters(particle_index, charge*0.0, sigma, epsilon*0.0)
for exception_index in range(force.getNumExceptions()):
p1, p2, chargeprod, sigma, epsilon = force.getExceptionParameters(exception_index)
if len(set(atom_proposal_order).intersection(set([p1, p2]))) == 0: #there is no growth index in this exception, so we
force.setExceptionParameters(exception_index, p1, p2, chargeProd = chargeprod * 0.0, sigma = sigma, epsilon = epsilon * 0.0)
else:
no_nb_system.removeForce(index)
elif force.__class__.__name__ == 'HarmonicAngleForce':
num_angles = force.getNumAngles()
for angle_idx in neglected_angle_terms:
p1, p2, p3, theta0, K = force.getAngleParameters(angle_idx)
force.setAngleParameters(angle_idx, p1, p2, p3, theta0, unit.Quantity(value=0.0, unit=unit.kilojoule/(unit.mole*unit.radian**2)))
# #the last thing to do for bookkeeping is to delete the torsion force associated with the extra ring-closing and chirality restraints
#
# #first, we see if there are two CustomTorsionForce objects...
# custom_torsion_forces = [force_index for force_index in range(no_nb_system.getNumForces()) if no_nb_system.getForce(force_index).__class__.__name__ == 'CustomTorsionForce']
# if len(custom_torsion_forces) == 2:
# #then the first one is the normal growth torsion force object and the second is the added torsion force object used to handle chirality and ring-closing constraints
# no_nb_system.removeForce(max(custom_torsion_forces))
forces = { no_nb_system.getForce(index).__class__.__name__ : no_nb_system.getForce(index) for index in range(no_nb_system.getNumForces()) }
_logger.info(f"\tfinal no-nonbonded final system forces {forces.keys()}")
#bonds
bond_forces = forces['HarmonicBondForce']
_logger.info(f"\tthere are {bond_forces.getNumBonds()} bond forces in the no-nonbonded final system")
#angles
angle_forces = forces['HarmonicAngleForce']
_logger.info(f"\tthere are {angle_forces.getNumAngles()} angle forces in the no-nonbonded final system")
#torsions
torsion_forces = forces['PeriodicTorsionForce']
_logger.info(f"\tthere are {torsion_forces.getNumTorsions()} torsion forces in the no-nonbonded final system")
return no_nb_system
def _copy_positions(self, atoms_with_positions, top_proposal, current_positions):
"""
Copy the current positions to an array that will also hold new positions
Parameters
----------
atoms_with_positions : list of parmed.Atom
parmed Atom objects denoting atoms that currently have positions
top_proposal : topology_proposal.TopologyProposal
topology proposal object
current_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
Positions of the current system
Returns
-------
new_positions : simtk.unit.Quantity with shape (n_atoms, 3) with units compatible with nanometers
New positions for new topology object with known positions filled in
"""
check_dimensionality(current_positions, unit.nanometers)
# Create new positions
new_shape = [top_proposal.n_atoms_new, 3]
# Workaround for CustomAngleForce NaNs: Create random non-zero positions for new atoms.
new_positions = unit.Quantity(np.random.random(new_shape), unit=unit.nanometers)
# Copy positions for atoms that have them defined
for atom in atoms_with_positions:
old_index = top_proposal.new_to_old_atom_map[atom.idx]
new_positions[atom.idx] = current_positions[old_index]
check_dimensionality(new_positions, unit.nanometers)
return new_positions
def _get_relevant_bond(self, atom1, atom2):
"""
Get parmaeters defining the bond connecting two atoms
Parameters
----------
atom1 : parmed.Atom
One of the atoms in the bond
atom2 : parmed.Atom object
The other atom in the bond
Returns
-------
bond : parmed.Bond with units modified to simtk.unit.Quantity
Bond connecting the two atoms, or None if constrained or no bond term exists.
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
bonds_1 = set(atom1.bonds)
bonds_2 = set(atom2.bonds)
relevant_bond_set = bonds_1.intersection(bonds_2)
relevant_bond = relevant_bond_set.pop()
if relevant_bond.type is None:
return None
relevant_bond_with_units = self._add_bond_units(relevant_bond)
check_dimensionality(relevant_bond_with_units.type.req, unit.nanometers)
check_dimensionality(relevant_bond_with_units.type.k, unit.kilojoules_per_mole/unit.nanometers**2)
return relevant_bond_with_units
def _get_bond_constraint(self, atom1, atom2, system):
"""
Get constraint parameters corresponding to the bond between the given atoms
Parameters
----------
atom1 : parmed.Atom
The first atom of the constrained bond
atom2 : parmed.Atom
The second atom of the constrained bond
system : openmm.System
The system containing the constraint
Returns
-------
constraint : simtk.unit.Quantity or None
If a constraint is defined between the two atoms, the length is returned; otherwise None
"""
# TODO: This algorithm is incredibly inefficient.
# Instead, generate a dictionary lookup of constrained distances.
atom_indices = set([atom1.idx, atom2.idx])
n_constraints = system.getNumConstraints()
constraint = None
for i in range(n_constraints):
p1, p2, length = system.getConstraintParameters(i)
constraint_atoms = set([p1, p2])
if len(constraint_atoms.intersection(atom_indices))==2:
constraint = length
if constraint is not None:
check_dimensionality(constraint, unit.nanometers)
return constraint
def _get_relevant_angle(self, atom1, atom2, atom3):
"""
Get the angle containing the 3 given atoms
Parameters
----------
atom1 : parmed.Atom
The first atom defining the angle
atom2 : parmed.Atom
The second atom defining the angle
atom3 : parmed.Atom
The third atom in the angle
Returns
-------
relevant_angle_with_units : parmed.Angle with parmeters modified to be simtk.unit.Quantity
Angle connecting the three atoms
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
atom1_angles = set(atom1.angles)
atom2_angles = set(atom2.angles)
atom3_angles = set(atom3.angles)
relevant_angle_set = atom1_angles.intersection(atom2_angles, atom3_angles)
# DEBUG
if len(relevant_angle_set) == 0:
print('atom1_angles:')
print(atom1_angles)
print('atom2_angles:')
print(atom2_angles)
print('atom3_angles:')
print(atom3_angles)
raise Exception('Atoms %s-%s-%s do not share a parmed Angle term' % (atom1, atom2, atom3))
relevant_angle = relevant_angle_set.pop()
if type(relevant_angle.type.k) != unit.Quantity:
relevant_angle_with_units = self._add_angle_units(relevant_angle)
else:
relevant_angle_with_units = relevant_angle
check_dimensionality(relevant_angle.type.theteq, unit.radians)
check_dimensionality(relevant_angle.type.k, unit.kilojoules_per_mole/unit.radians**2)
return relevant_angle_with_units
def _add_bond_units(self, bond):
"""
Attach units to a parmed harmonic bond
Parameters
----------
bond : parmed.Bond
The bond object whose paramters will be converted to unit-bearing quantities
Returns
-------
bond : parmed.Bond with units modified to simtk.unit.Quantity
The same modified Bond object that was passed in
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
# TODO: Shouldn't we be making a deep copy?
# If already promoted to unit-bearing quantities, return the object
if type(bond.type.k)==unit.Quantity:
return bond
# Add parmed units
# TODO: Get rid of this, and just operate on the OpenMM System instead
bond.type.req = unit.Quantity(bond.type.req, unit=unit.angstrom)
bond.type.k = unit.Quantity(2.0*bond.type.k, unit=unit.kilocalorie_per_mole/unit.angstrom**2)
return bond
def _add_angle_units(self, angle):
"""
Attach units to parmed harmonic angle
Parameters
----------
angle : parmed.Angle
The angle object whose paramters will be converted to unit-bearing quantities
Returns
-------
angle : parmed.Angle with units modified to simtk.unit.Quantity
The same modified Angle object that was passed in
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
# TODO: Shouldn't we be making a deep copy?
# If already promoted to unit-bearing quantities, return the object
if type(angle.type.k)==unit.Quantity:
return angle
# Add parmed units
# TODO: Get rid of this, and just operate on the OpenMM System instead
angle.type.theteq = unit.Quantity(angle.type.theteq, unit=unit.degree)
angle.type.k = unit.Quantity(2.0*angle.type.k, unit=unit.kilocalorie_per_mole/unit.radian**2)
return angle
def _add_torsion_units(self, torsion):
"""
Add the correct units to a torsion
Parameters
----------
torsion : parmed.Torsion
The angle object whose paramters will be converted to unit-bearing quantities
Returns
-------
torsion : parmed.Torsion with units modified to simtk.unit.Quantity
The same modified Torsion object that was passed in
Parameters representing unit-bearing quantities have been converted to simtk.unit.Quantity with units attached.
"""
# TODO: Shouldn't we be making a deep copy?
# If already promoted to unit-bearing quantities, return the object
if type(torsion.type.phi_k) == unit.Quantity:
return torsion
# Add parmed units
# TODO: Get rid of this, and just operate on the OpenMM System instead
torsion.type.phi_k = unit.Quantity(torsion.type.phi_k, unit=unit.kilocalorie_per_mole)
torsion.type.phase = unit.Quantity(torsion.type.phase, unit=unit.degree)
return torsion
def _rotation_matrix(self, axis, angle):
"""
Compute a rotation matrix about the origin given a coordinate axis and an angle.
Parameters
----------
axis : ndarray of shape (3,) without units
The axis about which rotation should occur
angle : float (implicitly in radians)
The angle of rotation about the axis
Returns
-------
rotation_matrix : ndarray of shape (3,3) without units
The 3x3 rotation matrix
"""
axis = axis/np.linalg.norm(axis)
axis_squared = np.square(axis)
cos_angle = np.cos(angle)
sin_angle = np.sin(angle)
rot_matrix_row_one = np.array([cos_angle+axis_squared[0]*(1-cos_angle),
axis[0]*axis[1]*(1-cos_angle) - axis[2]*sin_angle,
axis[0]*axis[2]*(1-cos_angle)+axis[1]*sin_angle])
rot_matrix_row_two = np.array([axis[1]*axis[0]*(1-cos_angle)+axis[2]*sin_angle,
cos_angle+axis_squared[1]*(1-cos_angle),
axis[1]*axis[2]*(1-cos_angle) - axis[0]*sin_angle])
rot_matrix_row_three = np.array([axis[2]*axis[0]*(1-cos_angle)-axis[1]*sin_angle,
axis[2]*axis[1]*(1-cos_angle)+axis[0]*sin_angle,
cos_angle+axis_squared[2]*(1-cos_angle)])
rotation_matrix = np.array([rot_matrix_row_one, rot_matrix_row_two, rot_matrix_row_three])
return rotation_matrix
def _cartesian_to_internal(self, atom_position, bond_position, angle_position, torsion_position):
"""
Cartesian to internal coordinate conversion
Parameters
----------
atom_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom whose internal coordinates are to be computed with respect to other atoms
bond_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with bond length ``r``
angle_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with angle ``theta``
torsion_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with torsion ``phi``
Returns
-------
internal_coords : tuple of (float, float, float)
Tuple representing (r, theta, phi):
r : float (implicitly in nanometers)
Bond length distance from ``bond_position`` to newly placed atom
theta : float (implicitly in radians on domain [0,pi])
Angle formed by ``(angle_position, bond_position, new_atom)``
phi : float (implicitly in radians on domain [-pi, +pi))
Torsion formed by ``(torsion_position, angle_position, bond_position, new_atom)``
detJ : float
The absolute value of the determinant of the Jacobian transforming from (r,theta,phi) to (x,y,z)
.. todo :: Clarify the direction of the Jacobian
"""
# TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs
check_dimensionality(atom_position, unit.nanometers)
check_dimensionality(bond_position, unit.nanometers)
check_dimensionality(angle_position, unit.nanometers)
check_dimensionality(torsion_position, unit.nanometers)
# Convert to internal coordinates once everything is dimensionless
# Make sure positions are float64 arrays implicitly in units of nanometers for numba
from perses.rjmc import coordinate_numba
internal_coords = coordinate_numba.cartesian_to_internal(
atom_position.value_in_unit(unit.nanometers).astype(np.float64),
bond_position.value_in_unit(unit.nanometers).astype(np.float64),
angle_position.value_in_unit(unit.nanometers).astype(np.float64),
torsion_position.value_in_unit(unit.nanometers).astype(np.float64))
# Return values are also in floating point implicitly in nanometers and radians
r, theta, phi = internal_coords
# Compute absolute value of determinant of Jacobian
detJ = np.abs(r**2*np.sin(theta))
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(phi, float)
check_dimensionality(detJ, float)
return internal_coords, detJ
def _internal_to_cartesian(self, bond_position, angle_position, torsion_position, r, theta, phi):
"""
Calculate the cartesian coordinates of a newly placed atom in terms of internal coordinates,
along with the absolute value of the determinant of the Jacobian.
Parameters
----------
bond_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with bond length ``r``
angle_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with angle ``theta``
torsion_position : simtk.unit.Quantity wrapped numpy array of shape (natoms,) with units compatible with nanometers
Position of atom separated from newly placed atom with torsion ``phi``
r : simtk.unit.Quantity with units compatible with nanometers
Bond length distance from ``bond_position`` to newly placed atom
theta : simtk.unit.Quantity with units compatible with radians
Angle formed by ``(angle_position, bond_position, new_atom)``
phi : simtk.unit.Quantity with units compatible with radians
Torsion formed by ``(torsion_position, angle_position, bond_position, new_atom)``
Returns
-------
xyz : simtk.unit.Quantity wrapped numpy array of shape (3,) with units compatible with nanometers
The position of the newly placed atom
detJ : float
The absolute value of the determinant of the Jacobian transforming from (r,theta,phi) to (x,y,z)
.. todo :: Clarify the direction of the Jacobian
"""
# TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs
check_dimensionality(bond_position, unit.nanometers)
check_dimensionality(angle_position, unit.nanometers)
check_dimensionality(torsion_position, unit.nanometers)
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(phi, float)
# Compute Cartesian coordinates from internal coordinates using all-dimensionless quantities
# All inputs to numba must be in float64 arrays implicitly in md_unit_syste units of nanometers and radians
from perses.rjmc import coordinate_numba
xyz = coordinate_numba.internal_to_cartesian(
bond_position.value_in_unit(unit.nanometers).astype(np.float64),
angle_position.value_in_unit(unit.nanometers).astype(np.float64),
torsion_position.value_in_unit(unit.nanometers).astype(np.float64),
np.array([r, theta, phi], np.float64))
# Transform position of new atom back into unit-bearing Quantity
xyz = unit.Quantity(xyz, unit=unit.nanometers)
# Compute abs det Jacobian using unitless values
detJ = np.abs(r**2*np.sin(theta))
check_dimensionality(xyz, unit.nanometers)
check_dimensionality(detJ, float)
return xyz, detJ
def _bond_log_pmf(self, bond, beta, n_divisions):
r"""
Calculate the log probability mass function (PMF) of drawing a bond.
.. math ::
p(r; \beta, K_r, r_0) \propto r^2 e^{-\frac{\beta K_r}{2} (r - r_0)^2 }
Parameters
----------
bond : parmed.Structure.Bond modified to use simtk.unit.Quantity
Valence bond parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing bond length
Returns
-------
r_i : np.ndarray of shape (n_divisions,) implicitly in units of nanometers
r_i[i] is the bond length leftmost bin edge with corresponding log probability mass function p_i[i]
log_p_i : np.ndarray of shape (n_divisions,)
log_p_i[i] is the corresponding log probability mass of bond length r_i[i]
bin_width : float implicitly in units of nanometers
The bin width for individual PMF bins
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: We end up computing the discretized PMF over and over again; we can speed this up by caching
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
# Check input argument dimensions
assert check_dimensionality(bond.type.req, unit.angstroms)
assert check_dimensionality(bond.type.k, unit.kilojoules_per_mole/unit.nanometers**2)
assert check_dimensionality(beta, unit.kilojoules_per_mole**(-1))
# Retrieve relevant quantities for valence bond
r0 = bond.type.req # equilibrium bond distance, unit-bearing quantity
k = bond.type.k * self._bond_softening_constant # force constant, unit-bearing quantity
sigma_r = unit.sqrt((1.0/(beta*k))) # standard deviation, unit-bearing quantity
# Convert to dimensionless quantities in MD unit system
r0 = r0.value_in_unit_system(unit.md_unit_system)
k = k.value_in_unit_system(unit.md_unit_system)
sigma_r = sigma_r.value_in_unit_system(unit.md_unit_system)
# Determine integration bounds
lower_bound, upper_bound = max(0., r0 - 6*sigma_r), (r0 + 6*sigma_r)
# Compute integration quadrature points
r_i, bin_width = np.linspace(lower_bound, upper_bound, num=n_divisions, retstep=True, endpoint=False)
# Form log probability
from scipy.special import logsumexp
log_p_i = 2*np.log(r_i+(bin_width/2.0)) - 0.5*((r_i+(bin_width/2.0)-r0)/sigma_r)**2
log_p_i -= logsumexp(log_p_i)
check_dimensionality(r_i, float)
check_dimensionality(log_p_i, float)
check_dimensionality(bin_width, float)
return r_i, log_p_i, bin_width
def _bond_logp(self, r, bond, beta, n_divisions):
r"""
Calculate the log-probability of a given bond at a given inverse temperature
Propose dimensionless bond length r from distribution
.. math ::
r \sim p(r; \beta, K_r, r_0) \propto r^2 e^{-\frac{\beta K_r}{2} (r - r_0)^2 }
Parameters
----------
r : float
bond length, implicitly in nanometers
bond : parmed.Structure.Bond modified to use simtk.unit.Quantity
Valence bond parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing bond length
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(r, float)
check_dimensionality(beta, 1/unit.kilojoules_per_mole)
r_i, log_p_i, bin_width = self._bond_log_pmf(bond, beta, n_divisions)
if (r < r_i[0]) or (r >= r_i[-1] + bin_width):
return LOG_ZERO
# Determine index that r falls within
index = int((r - r_i[0])/bin_width)
assert (index >= 0) and (index < n_divisions)
# Correct for division size
logp = log_p_i[index] - np.log(bin_width)
return logp
def _propose_bond(self, bond, beta, n_divisions):
r"""
Propose dimensionless bond length r from distribution
.. math ::
r \sim p(r; \beta, K_r, r_0) \propto r^2 e^{-\frac{\beta K_r}{2} (r - r_0)^2 }
Parameters
----------
bond : parmed.Structure.Bond modified to use simtk.unit.Quantity
Valence bond parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing bond length
Returns
-------
r : float
Dimensionless bond length, implicitly in nanometers
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(beta, 1/unit.kilojoules_per_mole)
r_i, log_p_i, bin_width = self._bond_log_pmf(bond, beta, n_divisions)
# Draw an index
index = np.random.choice(range(n_divisions), p=np.exp(log_p_i))
r = r_i[index]
# Draw uniformly in that bin
r = np.random.uniform(r, r+bin_width)
# Return dimensionless r, implicitly in nanometers
assert check_dimensionality(r, float)
assert (r > 0)
return r
def _angle_log_pmf(self, angle, beta, n_divisions):
r"""
Calculate the log probability mass function (PMF) of drawing a angle.
.. math ::
p(\theta; \beta, K_\theta, \theta_0) \propto \sin(\theta) e^{-\frac{\beta K_\theta}{2} (\theta - \theta_0)^2 }
Parameters
----------
angle : parmed.Structure.Angle modified to use simtk.unit.Quantity
Valence bond parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing bond length
Returns
-------
theta_i : np.ndarray of shape (n_divisions,) implicitly in units of radians
theta_i[i] is the angle with corresponding log probability mass function p_i[i]
log_p_i : np.ndarray of shape (n_divisions,)
log_p_i[i] is the corresponding log probability mass of angle theta_i[i]
bin_width : float implicitly in units of radians
The bin width for individual PMF bins
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
# TODO: We end up computing the discretized PMF over and over again; we can speed this up by caching
# Check input argument dimensions
assert check_dimensionality(angle.type.theteq, unit.radians)
assert check_dimensionality(angle.type.k, unit.kilojoules_per_mole/unit.radians**2)
assert check_dimensionality(beta, unit.kilojoules_per_mole**(-1))
# Retrieve relevant quantities for valence angle
theta0 = angle.type.theteq
k = angle.type.k * self._angle_softening_constant
sigma_theta = unit.sqrt(1.0/(beta * k)) # standard deviation, unit-bearing quantity
# Convert to dimensionless quantities in MD unit system
theta0 = theta0.value_in_unit_system(unit.md_unit_system)
k = k.value_in_unit_system(unit.md_unit_system)
sigma_theta = sigma_theta.value_in_unit_system(unit.md_unit_system)
# Determine integration bounds
# We can't compute log(0) so we have to avoid sin(theta) = 0 near theta = {0, pi}
EPSILON = 1.0e-3
lower_bound, upper_bound = EPSILON, np.pi-EPSILON
# Compute left bin edges
theta_i, bin_width = np.linspace(lower_bound, upper_bound, num=n_divisions, retstep=True, endpoint=False)
# Compute log probability
from scipy.special import logsumexp
log_p_i = np.log(np.sin(theta_i+(bin_width/2.0))) - 0.5*((theta_i+(bin_width/2.0)-theta0)/sigma_theta)**2
log_p_i -= logsumexp(log_p_i)
check_dimensionality(theta_i, float)
check_dimensionality(log_p_i, float)
check_dimensionality(bin_width, float)
return theta_i, log_p_i, bin_width
def _angle_logp(self, theta, angle, beta, n_divisions):
r"""
Calculate the log-probability of a given angle at a given inverse temperature
Propose dimensionless bond length r from distribution
.. math ::
p(\theta; \beta, K_\theta, \theta_0) \propto \sin(\theta) e^{-\frac{\beta K_\theta}{2} (\theta - \theta_0)^2 }
Parameters
----------
theta : float
angle, implicitly in radians
angle : parmed.Structure.Angle modified to use simtk.unit.Quantity
Valence angle parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse thermal energy
n_divisions : int
Number of quandrature points for drawing angle
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(theta, float)
check_dimensionality(beta, 1/unit.kilojoules_per_mole)
theta_i, log_p_i, bin_width = self._angle_log_pmf(angle, beta, n_divisions)
if (theta < theta_i[0]) or (theta >= theta_i[-1] + bin_width):
return LOG_ZERO
# Determine index that r falls within
index = int((theta - theta_i[0]) / bin_width)
assert (index >= 0) and (index < n_divisions)
# Correct for division size
logp = log_p_i[index] - np.log(bin_width)
return logp
def _propose_angle(self, angle, beta, n_divisions):
r"""
Propose dimensionless angle from distribution
.. math ::
\theta \sim p(\theta; \beta, K_\theta, \theta_0) \propto \sin(\theta) e^{-\frac{\beta K_\theta}{2} (\theta - \theta_0)^2 }
Parameters
----------
angle : parmed.Structure.Angle modified to use simtk.unit.Quantity
Valence angle parameters
beta : simtk.unit.Quantity with units compatible with 1/kilojoules_per_mole
Inverse temperature
n_divisions : int
Number of quandrature points for drawing angle
Returns
-------
theta : float
Dimensionless valence angle, implicitly in radians
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(beta, 1/unit.kilojoules_per_mole)
theta_i, log_p_i, bin_width = self._angle_log_pmf(angle, beta, n_divisions)
# Draw an index
index = np.random.choice(range(n_divisions), p=np.exp(log_p_i))
theta = theta_i[index]
# Draw uniformly in that bin
theta = np.random.uniform(theta, theta+bin_width)
# Return dimensionless theta, implicitly in nanometers
assert check_dimensionality(theta, float)
return theta
def _torsion_scan(self, torsion_atom_indices, positions, r, theta, n_divisions):
"""
Compute unit-bearing Carteisan positions and torsions (dimensionless, in md_unit_system) for a torsion scan
Parameters
----------
torsion_atom_indices : int tuple of shape (4,)
Atom indices defining torsion, where torsion_atom_indices[0] is the atom to be driven
positions : simtk.unit.Quantity of shape (natoms,3) with units compatible with nanometers
Positions of the atoms in the system
r : float (implicitly in md_unit_system)
Dimensionless bond length (must be in nanometers)
theta : float (implicitly in md_unit_system)
Dimensionless valence angle (must be in radians)
n_divisions : int
The number of divisions for the torsion scan
Returns
-------
xyzs : simtk.unit.Quantity wrapped np.ndarray of shape (n_divisions,3) with dimensions length
The cartesian coordinates of each
phis : np.ndarray of shape (n_divisions,), implicitly in radians
The torsions angles representing the left bin edge at which a potential will be calculated
bin_width : float, implicitly in radians
The bin width of torsion scan increment
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
assert check_dimensionality(positions, unit.angstroms)
assert check_dimensionality(r, float)
assert check_dimensionality(theta, float)
# Compute dimensionless positions in md_unit_system as numba-friendly float64
length_unit = unit.nanometers
import copy
positions_copy = copy.deepcopy(positions)
positions_copy = positions_copy.value_in_unit(length_unit).astype(np.float64)
atom_positions, bond_positions, angle_positions, torsion_positions = [ positions_copy[index] for index in torsion_atom_indices ]
# Compute dimensionless torsion values for torsion scan
phis, bin_width = np.linspace(-np.pi, +np.pi, num=n_divisions, retstep=True, endpoint=False)
# Compute dimensionless positions for torsion scan
from perses.rjmc import coordinate_numba
internal_coordinates = np.array([r, theta, 0.0], np.float64)
xyzs = coordinate_numba.torsion_scan(bond_positions, angle_positions, torsion_positions, internal_coordinates, phis)
# Convert positions back into standard md_unit_system length units (nanometers)
xyzs_quantity = unit.Quantity(xyzs, unit=unit.nanometers)
# Return unit-bearing positions and dimensionless torsions (implicitly in md_unit_system)
check_dimensionality(xyzs_quantity, unit.nanometers)
check_dimensionality(phis, float)
return xyzs_quantity, phis, bin_width
def _torsion_log_pmf(self, growth_context, torsion_atom_indices, positions, r, theta, beta, n_divisions):
"""
Calculate the torsion log probability using OpenMM, including all energetic contributions for the atom being driven
This includes all contributions from bonds, angles, and torsions for the atom being placed
(and, optionally, sterics if added to the growth system when it was created).
Parameters
----------
growth_context : simtk.openmm.Context
Context containing the modified system
torsion_atom_indices : int tuple of shape (4,)
Atom indices defining torsion, where torsion_atom_indices[0] is the atom to be driven
positions : simtk.unit.Quantity with shape (natoms,3) with units compatible with nanometers
Positions of the atoms in the system
r : float (implicitly in nanometers)
Dimensionless bond length (must be in nanometers)
theta : float (implcitly in radians on domain [0,+pi])
Dimensionless valence angle (must be in radians)
beta : simtk.unit.Quantity with units compatible with1/(kJ/mol)
Inverse thermal energy
n_divisions : int
Number of divisions for the torsion scan
Returns
-------
logp_torsions : np.ndarray of float with shape (n_divisions,)
logp_torsions[i] is the normalized probability density at phis[i]
phis : np.ndarray of float with shape (n_divisions,), implicitly in radians
phis[i] is the torsion angle left bin edges at which the log probability logp_torsions[i] was calculated
bin_width : float implicitly in radian
The bin width for torsions
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: This method could benefit from memoization to speed up tests and particle filtering
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(positions, unit.angstroms)
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(beta, 1.0 / unit.kilojoules_per_mole)
# Compute energies for all torsions
logq = np.zeros(n_divisions) # logq[i] is the log unnormalized torsion probability density
atom_idx = torsion_atom_indices[0]
xyzs, phis, bin_width = self._torsion_scan(torsion_atom_indices, positions, r, theta, n_divisions)
xyzs = xyzs.value_in_unit_system(unit.md_unit_system) # make positions dimensionless again
positions = positions.value_in_unit_system(unit.md_unit_system)
for i, xyz in enumerate(xyzs):
# Set positions
positions[atom_idx,:] = xyz
growth_context.setPositions(positions)
# Compute potential energy
state = growth_context.getState(getEnergy=True)
potential_energy = state.getPotentialEnergy()
# Store unnormalized log probabilities
logq_i = -beta*potential_energy
logq[i] = logq_i
# It's OK to have a few torsions with NaN energies,
# but we need at least _some_ torsions to have finite energies
if np.sum(np.isnan(logq)) == n_divisions:
raise Exception("All %d torsion energies in torsion PMF are NaN." % n_divisions)
# Suppress the contribution from any torsions with NaN energies
logq[np.isnan(logq)] = -np.inf
# Compute the normalized log probability
from scipy.special import logsumexp
logp_torsions = logq - logsumexp(logq)
# Write proposed torsion energies to a PDB file for visualization or debugging, if desired
if hasattr(self, '_proposal_pdbfile'):
# Write proposal probabilities to PDB file as B-factors for inert atoms
f_i = -logp_torsions
f_i -= f_i.min() # minimum free energy is zero
f_i[f_i > 999.99] = 999.99
self._proposal_pdbfile.write('MODEL\n')
for i, xyz in enumerate(xyzs):
self._proposal_pdbfile.write('ATOM %5d %4s %3s %c%4d %8.3f%8.3f%8.3f%6.2f%6.2f\n' % (i+1, ' Ar ', 'Ar ', ' ', atom_idx+1, 10*xyz[0], 10*xyz[1], 10*xyz[2], np.exp(logp_torsions[i]), f_i[i]))
self._proposal_pdbfile.write('TER\n')
self._proposal_pdbfile.write('ENDMDL\n')
# TODO: Write proposal PMFs to storage
# atom_proposal_indices[order]
# atom_positions[order,k]
# torsion_pmf[order, division_index]
assert check_dimensionality(logp_torsions, float)
assert check_dimensionality(phis, float)
assert check_dimensionality(bin_width, float)
return logp_torsions, phis, bin_width
def _propose_torsion(self, growth_context, torsion_atom_indices, positions, r, theta, beta, n_divisions):
"""
Propose a torsion angle using OpenMM
Parameters
----------
growth_context : simtk.openmm.Context
Context containing the modified system
torsion_atom_indices : int tuple of shape (4,)
Atom indices defining torsion, where torsion_atom_indices[0] is the atom to be driven
positions : simtk.unit.Quantity with shape (natoms,3) with units compatible with nanometers
Positions of the atoms in the system
r : float (implicitly in nanometers)
Dimensionless bond length (must be in nanometers)
theta : float (implcitly in radians on domain [0,+pi])
Dimensionless valence angle (must be in radians)
beta : simtk.unit.Quantity with units compatible with1/(kJ/mol)
Inverse thermal energy
n_divisions : int
Number of divisions for the torsion scan
Returns
-------
phi : float, implicitly in radians
The proposed torsion angle
logp : float
The log probability of the proposal
.. todo :: In future, this approach will be improved by eliminating discrete quadrature.
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# TODO: Switch from simple discrete quadrature to more sophisticated computation of pdf
check_dimensionality(positions, unit.angstroms)
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(beta, 1.0 / unit.kilojoules_per_mole)
# Compute probability mass function for all possible proposed torsions
logp_torsions, phis, bin_width = self._torsion_log_pmf(growth_context, torsion_atom_indices, positions, r, theta, beta, n_divisions)
# Draw a torsion bin and a torsion uniformly within that bin
index = np.random.choice(range(len(phis)), p=np.exp(logp_torsions))
phi = phis[index]
logp = logp_torsions[index]
# Draw uniformly within the bin
phi = np.random.uniform(phi, phi+bin_width)
logp -= np.log(bin_width)
assert check_dimensionality(phi, float)
assert check_dimensionality(logp, float)
return phi, logp
def _torsion_logp(self, growth_context, torsion_atom_indices, positions, r, theta, phi, beta, n_divisions):
"""
Calculate the logp of a torsion using OpenMM
Parameters
----------
growth_context : simtk.openmm.Context
Context containing the modified system
torsion_atom_indices : int tuple of shape (4,)
Atom indices defining torsion, where torsion_atom_indices[0] is the atom to be driven
positions : simtk.unit.Quantity with shape (natoms,3) with units compatible with nanometers
Positions of the atoms in the system
r : float (implicitly in nanometers)
Dimensionless bond length (must be in nanometers)
theta : float (implicitly in radians on domain [0,+pi])
Dimensionless valence angle (must be in radians)
phi : float (implicitly in radians on domain [-pi,+pi))
Dimensionless torsion angle (must be in radians)
beta : simtk.unit.Quantity with units compatible with1/(kJ/mol)
Inverse thermal energy
n_divisions : int
Number of divisions for the torsion scan
Returns
-------
torsion_logp : float
The log probability this torsion would be drawn
"""
# TODO: Overhaul this method to accept and return unit-bearing quantities
# Check that quantities are unitless
check_dimensionality(positions, unit.angstroms)
check_dimensionality(r, float)
check_dimensionality(theta, float)
check_dimensionality(phi, float)
check_dimensionality(beta, 1.0 / unit.kilojoules_per_mole)
# Compute torsion probability mass function
logp_torsions, phis, bin_width = self._torsion_log_pmf(growth_context, torsion_atom_indices, positions, r, theta, beta, n_divisions)
# Determine which bin the torsion falls within
index = np.argmin(np.abs(phi-phis)) # WARNING: This assumes both phi and phis have domain of [-pi,+pi)
# Convert from probability mass function to probability density function so that sum(dphi*p) = 1, with dphi = (2*pi)/n_divisions.
torsion_logp = logp_torsions[index] - np.log(bin_width)
assert check_dimensionality(torsion_logp, float)
return torsion_logp
class GeometrySystemGenerator(object):
"""
Internal utility class to generate OpenMM systems with only valence terms and special parameters for newly placed atoms to assist in geometry proposals.
The resulting system will have the specified global context parameter (controlled by ``parameter_name``)
that selects which proposed atom will have all its valence terms activated. When this parameter is set to the
index of the atom being added within ``growth_indices``, all valence terms associated with that atom will be computed.
Only valence terms involving newly placed atoms will be computed; valence terms between fixed atoms will be omitted.
"""
def __init__(self,
reference_system,
torsion_proposal_order,
omitted_bonds,
reference_topology,
global_parameter_name='growth_index',
add_extra_torsions = True,
add_extra_angles = False,
use_sterics=False,
force_names=None,
force_parameters=None,
verbose=True,
neglect_angles = True,
use_14_nonbondeds = True):
"""
Parameters
----------
reference_system : simtk.openmm.System object
The system containing the relevant forces and particles
torsion_proposal_order : list of list of 4-int
The order in which the torsion indices will be proposed
omitted_bonds : list of tuple of int
list of atom index tuples (corresponding to reference_topology atoms) which have been omitted in the atom proposal
reference_topology : simtk.openmm.topology.Topology (augmented)
used to probe the topology for rotamers, chiral centers, etc.
global_parameter_name : str, optional, default='growth_index'
The name of the global context parameter
add_extra_torsions : bool, optional
Whether to add additional torsions to keep rings flat. Default true.
force_names : list of str
A list of the names of forces that will be included in this system
force_parameters : dict
Options for the forces (e.g., NonbondedMethod : 'CutffNonPeriodic')
neglect_angles : bool
whether to ignore and report on theta angle potentials that add variance to the work
verbose : bool, optional, default=False
If True, will print verbose output.
neglect_angles : bool
whether to neglect (coupled) angle terms that would make the variance non-zero (within numerical tolerance threshold)
use_14_nonbondeds : bool, default True
whether to consider 1,4 exception interactions in the geometry proposal
Attributes
----------
growth_system : simtk.openmm.System object
The system containing all of the valence forces to be added (with the exception of neglected angle forces if neglect_angles == False) with respect
to the reference_system Parameter.
atoms_with_positions_system : simtk.openmm.System object
The system containing all of the core atom valence forces. This is to be used in the proposal to assert that the final growth_system energy plus
the atoms_with_positions_system energy is equal to the final_system energy (for the purpose of energy bookkeeping).
neglected_angle_terms : list of ints
The indices of the HarmonicAngleForce parameters which are neglected for the purpose of minimizing work variance. This will be empty if neglect_angles == False.
"""
import copy
# TODO: Rename `growth_indices` (which is really a list of Atom objects) to `atom_growth_order` or `atom_addition_order`
#create an 'omitted_terms'
self.omitted_growth_terms = {'bonds': [], 'angles': [], 'torsions': [], '1,4s': []}
self.omitted_bonds = omitted_bonds
self.extra_torsion_terms = {}
self.extra_angle_terms = {}
self.reference_topology = reference_topology
# Check that we're not using the reserved name
if global_parameter_name == 'growth_idx':
raise ValueError('global_parameter_name cannot be "growth_idx" due to naming collisions')
growth_indices = [ torsion[0] for torsion in torsion_proposal_order ]
default_growth_index = len(growth_indices) # default value of growth index to use in System that is returned
self.current_growth_index = default_growth_index
# Bonds, angles, and torsions
self._HarmonicBondForceEnergy = "select(step({}+0.1 - growth_idx), (K/2)*(r-r0)^2, 0);"
self._HarmonicAngleForceEnergy = "select(step({}+0.1 - growth_idx), (K/2)*(theta-theta0)^2, 0);"
self._PeriodicTorsionForceEnergy = "select(step({}+0.1 - growth_idx), k*(1+cos(periodicity*theta-phase)), 0);"
# Nonbonded sterics and electrostatics.
# TODO: Allow user to select whether electrostatics or sterics components are included in the nonbonded interaction energy.
self._nonbondedEnergy = "select(step({}+0.1 - growth_idx), U_sterics + U_electrostatics, 0);"
self._nonbondedEnergy += "growth_idx = max(growth_idx1, growth_idx2);"
# Sterics
from openmmtools.constants import ONE_4PI_EPS0 # OpenMM constant for Coulomb interactions (implicitly in md_unit_system units)
# TODO: Auto-detect combining rules to allow this to work with other force fields?
# TODO: Enable more flexible handling / metaprogramming of CustomForce objects?
self._nonbondedEnergy += "U_sterics = 4*epsilon*x*(x-1.0); x = (sigma/r)^6;"
self._nonbondedEnergy += "epsilon = sqrt(epsilon1*epsilon2); sigma = 0.5*(sigma1 + sigma2);"
# Electrostatics
self._nonbondedEnergy += "U_electrostatics = ONE_4PI_EPS0*charge1*charge2/r;"
self._nonbondedEnergy += "ONE_4PI_EPS0 = %f;" % ONE_4PI_EPS0
# Exceptions
self._nonbondedExceptionEnergy = "select(step({}+0.1 - growth_idx), U_exception, 0);"
self._nonbondedExceptionEnergy += "U_exception = ONE_4PI_EPS0*chargeprod/r + 4*epsilon*x*(x-1.0); x = (sigma/r)^6;"
self._nonbondedExceptionEnergy += "ONE_4PI_EPS0 = %f;" % ONE_4PI_EPS0
self.sterics_cutoff_distance = 9.0 * unit.angstroms # cutoff for steric interactions with added/deleted atoms
self.verbose = verbose
# Get list of particle indices for new and old atoms.
new_particle_indices = growth_indices
old_particle_indices = [idx for idx in range(reference_system.getNumParticles()) if idx not in new_particle_indices]
# Compile index of reference forces
reference_forces = dict()
reference_forces_indices = dict()
for (index, force) in enumerate(reference_system.getForces()):
force_name = force.__class__.__name__
if force_name in reference_forces:
raise ValueError('reference_system has two {} objects. This is currently unsupported.'.format(force_name))
else:
reference_forces_indices[force_name] = index
reference_forces[force_name] = force
# Create new System
from simtk import openmm
growth_system = openmm.System()
atoms_with_positions_system = copy.deepcopy(reference_system)
# Copy particles
for i in range(reference_system.getNumParticles()):
growth_system.addParticle(reference_system.getParticleMass(i))
# Virtual sites are, in principle, automatically supported
# Create bond force
_logger.info("\tcreating bond force...")
modified_bond_force = openmm.CustomBondForce(self._HarmonicBondForceEnergy.format(global_parameter_name))
modified_bond_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['r0', 'K', 'growth_idx']:
modified_bond_force.addPerBondParameter(parameter_name)
growth_system.addForce(modified_bond_force)
reference_bond_force = reference_forces['HarmonicBondForce']
_logger.info(f"\tthere are {reference_bond_force.getNumBonds()} bonds in reference force.")
for bond_index in range(reference_bond_force.getNumBonds()):
p1, p2, r0, K = reference_bond_force.getBondParameters(bond_index)
growth_idx = self._calculate_growth_idx([p1, p2], growth_indices)
_logger.debug(f"\t\tfor bond {bond_index} (i.e. partices {p1} and {p2}), the growth_index is {growth_idx}")
if growth_idx > 0:
if (p1, p2) not in omitted_bonds and (p2, p1) not in omitted_bonds:
modified_bond_force.addBond(p1, p2, [r0, K, growth_idx])
_logger.debug(f"\t\t\tadding to the growth system")
else:
_logger.debug(f"\t\t\tomitted bond")
self.omitted_growth_terms['bonds'].append((p1,p2))
atoms_with_positions_system.getForce(reference_forces_indices['HarmonicBondForce']).setBondParameters(bond_index,p1, p2, r0, K*0.0)
else:
_logger.debug(f"\t\t\tadding to the the atoms with positions system.")
# Create angle force
# NOTE: here, we are implementing an angle exclusion scheme for angle terms that are coupled to lnZ_phi
_logger.info("\tcreating angle force...")
modified_angle_force = openmm.CustomAngleForce(self._HarmonicAngleForceEnergy.format(global_parameter_name))
modified_angle_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['theta0', 'K', 'growth_idx']:
modified_angle_force.addPerAngleParameter(parameter_name)
growth_system.addForce(modified_angle_force)
reference_angle_force = reference_forces['HarmonicAngleForce']
neglected_angle_term_indices = [] #initialize the index list of neglected angle forces
_logger.info(f"\tthere are {reference_angle_force.getNumAngles()} angles in reference force.")
for angle in range(reference_angle_force.getNumAngles()):
p1, p2, p3, theta0, K = reference_angle_force.getAngleParameters(angle)
growth_idx = self._calculate_growth_idx([p1, p2, p3], growth_indices)
_logger.debug(f"\t\tfor angle {angle} (i.e. partices {p1}, {p2}, and {p3}), the growth_index is {growth_idx}")
if growth_idx > 0:
if neglect_angles and (not use_sterics):
if any( [p1, p2, p3] == torsion[:3] or [p3, p2, p1] == torsion[:3] for torsion in torsion_proposal_order):
#then there is a new atom in the angle term and the angle is part of a torsion and is necessary
_logger.debug(f"\t\t\tadding to the growth system since it is part of a torsion")
modified_angle_force.addAngle(p1, p2, p3, [theta0, K, growth_idx])
else:
#then it is a neglected angle force, so it must be tallied
_logger.debug(f"\t\t\ttallying to neglected term indices")
neglected_angle_term_indices.append(angle)
else:
possible_omissions = [(p1,p2), (p2, p3), (p2,p1), (p3,p2)]
if any(angle_pair in omitted_bonds for angle_pair in possible_omissions):
_logger.debug(f"\t\t\tomitted angle")
self.omitted_growth_terms['angles'].append((p1,p2,p3))
else:
_logger.debug(f"\t\t\tadding to the growth system")
modified_angle_force.addAngle(p1, p2, p3, [theta0, K, growth_idx])
atoms_with_positions_system.getForce(reference_forces_indices['HarmonicAngleForce']).setAngleParameters(angle, p1, p2, p3, theta0, K*0.0)
else:
#then it is an angle term of core atoms and should be added to the atoms_with_positions_angle_force
_logger.debug(f"\t\t\tadding to the the atoms with positions system.")
# Create torsion force
_logger.info("\tcreating torsion force...")
modified_torsion_force = openmm.CustomTorsionForce(self._PeriodicTorsionForceEnergy.format(global_parameter_name))
modified_torsion_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['periodicity', 'phase', 'k', 'growth_idx']:
modified_torsion_force.addPerTorsionParameter(parameter_name)
_logger.info(f"\tcreating extra torsions force...")
extra_modified_torsion_force = copy.deepcopy(modified_torsion_force) #we will add this if we _do_ call the extra modified torsions force
growth_system.addForce(modified_torsion_force) #but we add this, regardlesss
reference_torsion_force = reference_forces['PeriodicTorsionForce']
_logger.info(f"\tthere are {reference_torsion_force.getNumTorsions()} torsions in reference force.")
for torsion in range(reference_torsion_force.getNumTorsions()):
p1, p2, p3, p4, periodicity, phase, k = reference_torsion_force.getTorsionParameters(torsion)
growth_idx = self._calculate_growth_idx([p1, p2, p3, p4], growth_indices)
_logger.debug(f"\t\tfor torsion {torsion} (i.e. partices {p1}, {p2}, {p3}, and {p4}), the growth_index is {growth_idx}")
if growth_idx > 0:
possible_omissions = [(p1,p2), (p2,p3), (p3,p4), (p2,p1), (p3,p2), (p4,p3)]
if any(torsion_pair in omitted_bonds for torsion_pair in possible_omissions):
_logger.debug(f"\t\t\tomitted torsion")
self.omitted_growth_terms['torsions'].append((p1,p2,p3,p4))
else:
modified_torsion_force.addTorsion(p1, p2, p3, p4, [periodicity, phase, k, growth_idx])
_logger.debug(f"\t\t\tadding to the growth system")
atoms_with_positions_system.getForce(reference_forces_indices['PeriodicTorsionForce']).setTorsionParameters(torsion, p1, p2, p3, p4, periodicity, phase, k*0.0)
else:
_logger.debug(f"\t\t\tadding to the the atoms with positions system.")
# TODO: check this for bugs by turning on sterics
if (use_sterics or use_14_nonbondeds) and 'NonbondedForce' in reference_forces.keys():
_logger.info("\tcreating nonbonded force...")
# Copy parameters for local sterics parameters in nonbonded force
reference_nonbonded_force = reference_forces['NonbondedForce']
atoms_with_positions_system.getForce(reference_forces_indices['NonbondedForce']).setUseDispersionCorrection(False)
_logger.info("\t\tgrabbing reference nonbonded method, cutoff, switching function, switching distance...")
reference_nonbonded_force_method = reference_nonbonded_force.getNonbondedMethod()
_logger.debug(f"\t\t\tnonbonded method: {reference_nonbonded_force_method}")
reference_nonbonded_force_cutoff = reference_nonbonded_force.getCutoffDistance()
_logger.debug(f"\t\t\tnonbonded cutoff distance: {reference_nonbonded_force_cutoff}")
reference_nonbonded_force_switching_function = reference_nonbonded_force.getUseSwitchingFunction()
_logger.debug(f"\t\t\tnonbonded switching function (boolean): {reference_nonbonded_force_switching_function}")
reference_nonbonded_force_switching_distance = reference_nonbonded_force.getSwitchingDistance()
_logger.debug(f"\t\t\tnonbonded switching distance: {reference_nonbonded_force_switching_distance}")
#now we add the 1,4 interaction force
if reference_nonbonded_force.getNumExceptions() > 0:
_logger.info("\t\tcreating nonbonded exception force (i.e. custom bond for 1,4s)...")
custom_bond_force = openmm.CustomBondForce(self._nonbondedExceptionEnergy.format(global_parameter_name))
custom_bond_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['chargeprod', 'sigma', 'epsilon', 'growth_idx']:
custom_bond_force.addPerBondParameter(parameter_name)
growth_system.addForce(custom_bond_force)
#Now we iterate through the exceptions and add custom bond forces if the growth intex for that bond > 0
_logger.info("\t\tlooping through exceptions calculating growth indices, and adding appropriate interactions to custom bond force.")
_logger.info(f"\t\tthere are {reference_nonbonded_force.getNumExceptions()} in the reference Nonbonded force")
possible_omissions = [[(p1,p2), (p2,p3), (p3,p4), (p2,p1), (p3,p2), (p4,p3)]]
for exception_index in range(reference_nonbonded_force.getNumExceptions()):
p1, p2, chargeprod, sigma, epsilon = reference_nonbonded_force.getExceptionParameters(exception_index)
growth_idx = self._calculate_growth_idx([p1, p2], growth_indices)
_logger.debug(f"\t\t\t{p1} and {p2} with charge {chargeprod} and epsilon {epsilon} have a growth index of {growth_idx}")
# Only need to add terms that are nonzero and involve newly added atoms.
if (growth_idx > 0) and ((chargeprod.value_in_unit_system(unit.md_unit_system) != 0.0) or (epsilon.value_in_unit_system(unit.md_unit_system) != 0.0)):
fails = 0
for tor in self.omitted_growth_terms['torsions']:
tor_set = set(tor)
if set((p1,p2)).issubset(tor_set):
fails += 1
if fails > 0:
self.omitted_growth_terms['1,4s'].append((p1,p2))
else:
custom_bond_force.addBond(p1, p2, [chargeprod, sigma, epsilon, growth_idx])
else:
_logger.info("\t\tthere are no Exceptions in the reference system.")
if use_sterics:
#now we define a custom nonbonded force for the growth system
_logger.info("\t\tadding custom nonbonded force...")
modified_sterics_force = openmm.CustomNonbondedForce(self._nonbondedEnergy.format(global_parameter_name))
modified_sterics_force.addGlobalParameter(global_parameter_name, default_growth_index)
for parameter_name in ['charge', 'sigma', 'epsilon', 'growth_idx']:
modified_sterics_force.addPerParticleParameter(parameter_name)
growth_system.addForce(modified_sterics_force)
# Translate nonbonded method to the custom nonbonded force
_logger.info("\t\tsetting nonbonded method, cutoff, switching function, and switching distance to custom nonbonded force...")
if reference_nonbonded_force_method in [0,1]: #if Nonbonded method is NoCutoff or CutoffNonPeriodic
modified_sterics_force.setNonbondedMethod(reference_nonbonded_force_method)
modified_sterics_force.setCutoffDistance(reference_nonbonded_force_cutoff)
elif reference_nonbonded_force_method in [2,3,4]:
modified_sterics_force.setNonbondedMethod(2)
modified_sterics_force.setCutoffDistance(self.sterics_cutoff_distance)
modified_sterics_force.setUseSwitchingFunction(reference_nonbonded_force_switching_function)
modified_sterics_force.setSwitchingDistance(reference_nonbonded_force_switching_distance)
else:
raise Exception(f"reference force nonbonded method {reference_nonbonded_force_method} is NOT supported for custom nonbonded force!")
# define atoms_with_positions_Nonbonded_Force
#atoms_with_positions_nonbonded_force.setUseDispersionCorrection(False)
# Add particle parameters to the custom nonbonded force...and add interactions to the atoms_with_positions_nonbonded_force if growth_index == 0
_logger.info("\t\tlooping through reference nonbonded force to add particle params to custom nonbonded force")
for particle_index in range(reference_nonbonded_force.getNumParticles()):
[charge, sigma, epsilon] = reference_nonbonded_force.getParticleParameters(particle_index)
growth_idx = self._calculate_growth_idx([particle_index], growth_indices)
modified_sterics_force.addParticle([charge, sigma, epsilon, growth_idx])
if particle_index in growth_indices:
atoms_with_positions_system.getForce(reference_forces_indices['NonbondedForce']).setParticleParameters(particle_index, charge*0.0, sigma, epsilon*0.0)
# Add exclusions, which are active at all times.
# (1,4) exceptions are always included, since they are part of the valence terms.
_logger.info("\t\tlooping through reference nonbonded force exceptions to add exclusions to custom nonbonded force")
for exception_index in range(reference_nonbonded_force.getNumExceptions()):
[p1, p2, chargeprod, sigma, epsilon] = reference_nonbonded_force.getExceptionParameters(exception_index)
modified_sterics_force.addExclusion(p1, p2)
#we also have to add the exceptions to the atoms_with_positions_nonbonded_force
#if len(set([p1, p2]).intersection(set(old_particle_indices))) == 2:
if len(set([p1,p2]).intersection(set(growth_indices))) > 0:
_logger.debug(f"\t\t\tparticle {p1} and/or {p2} are new indices and have an exception of {chargeprod} and {epsilon}. setting to zero.")
#then both particles are old, so we can add the exception to the atoms_with_positions_nonbonded_force
atoms_with_positions_system.getForce(reference_forces_indices['NonbondedForce']).setExceptionParameters(exception_index, p1, p2, chargeprod * 0.0, sigma, epsilon * 0.0)
# Only compute interactions of new particles with all other particles
# TODO: Allow inteactions to be resticted to only the residue being grown.
modified_sterics_force.addInteractionGroup(set(new_particle_indices), set(old_particle_indices))
modified_sterics_force.addInteractionGroup(set(new_particle_indices), set(new_particle_indices))
if reference_nonbonded_force_method in [0,1]:
if 'MonteCarloBarostat' in reference_forces_indices.keys():
atoms_with_positions_system.removeForce(reference_forces_indices['MonteCarloBarostat'])
else:
if 'MonteCarloBarostat' in reference_forces_indices.keys():
atoms_with_positions_system.removeForce(reference_forces_indices['MonteCarloBarostat'])
if 'NonbondedForce' in reference_forces_indices.keys(): #if we aren't using 14 interactions, we simply delete the nonbonded force object
atoms_with_positions_system.removeForce(reference_forces_indices['NonbondedForce'])
elif 'NonbondedForce' in reference_forces.keys():
if 'MonteCarloBarostat' in reference_forces_indices.keys():
atoms_with_positions_system.removeForce(reference_forces_indices['MonteCarloBarostat'])
if 'NonbondedForce' in reference_forces_indices.keys(): #if we aren't using 14 interactions, we simply delete the nonbonded force object
atoms_with_positions_system.removeForce(reference_forces_indices['NonbondedForce'])
# Add extra ring-closing torsions, if requested.
if add_extra_torsions:
_logger.debug(f"\t\tattempting to add extra torsions...")
if reference_topology == None:
raise ValueError("Need to specify topology in order to add extra torsions.")
self._determine_extra_torsions(extra_modified_torsion_force, reference_topology, growth_indices)
if extra_modified_torsion_force.getNumTorsions() > 0:
#then we should add it to the growth system...
growth_system.addForce(extra_modified_torsion_force)
# if add_extra_angles:
# if reference_topology==None:
# raise ValueError("Need to specify topology in order to add extra angles")
# self._determine_extra_angles(modified_angle_force, reference_topology, growth_indices)
# Store growth system
self._growth_parameter_name = global_parameter_name
self._growth_system = growth_system
self._atoms_with_positions_system = atoms_with_positions_system #note this is only bond, angle, and torsion forces
self.neglected_angle_terms = neglected_angle_term_indices #these are angle terms that are neglected because of coupling to lnZ_phi
_logger.info("Neglected angle terms : {}".format(neglected_angle_term_indices))
_logger.info(f"omitted_growth_terms: {self.omitted_growth_terms}")
_logger.info(f"extra torsions: {self.extra_torsion_terms}")
def set_growth_parameter_index(self, growth_parameter_index, context=None):
"""
Set the growth parameter index
"""
# TODO: Set default force global parameters if context is not None.
if context is not None:
context.setParameter(self._growth_parameter_name, growth_parameter_index)
self.current_growth_index = growth_parameter_index
def get_modified_system(self):
"""
Create a modified system with parameter_name parameter. When 0, only core atoms are interacting;
for each integer above 0, an additional atom is made interacting, with order determined by growth_index.
Returns
-------
growth_system : simtk.openmm.System object
System with the appropriate modifications, with growth parameter set to maximum.
"""
return self._growth_system
def _determine_extra_torsions(self,
torsion_force,
reference_topology,
growth_indices):
"""
In order to facilitate ring closure and ensure proper bond stereochemistry,
we add additional biasing torsions to rings and stereobonds that are then corrected
for in the acceptance probability.
Determine which residue is covered by the new atoms
Identify rotatable bonds
Construct analogous residue in OpenEye and generate configurations with Omega
Measure appropriate torsions and generate relevant parameters
.. warning :: Only one residue should be changing
.. warning :: This currently will not work for polymer residues
.. todo :: Use a database of biasing torsions constructed ahead of time and match to residues by NetworkX
Parameters
----------
torsion_force : openmm.CustomTorsionForce object
the new/old torsion force if forward/backward
reference_topology : openmm.app.Topology object (augmented)
the new/old topology if forward/backward
oemol : openeye.oechem.OEMol
An OEMol representing the new (old) system if forward (backward)
growth_indices : list of int
The list of new atoms and the order in which they will be added.
Returns
-------
torsion_force : openmm.CustomTorsionForce
The torsion force with extra torsions added appropriately.
"""
from perses.rjmc import coordinate_numba
from openeye import oechem
# Do nothing if there are no atoms to grow.
if len(growth_indices) == 0:
return torsion_force
#set ring restraints
_logger.debug(f"\t\t\tattempting to add ring restraints")
#get the list of torsions in the molecule that are not about a rotatable bond
# Note that only torsions involving heavy atoms are enumerated here.
rotor = oechem.OEIsRotor()
torsion_predicate = oechem.OENotBond(rotor)
non_rotor_torsions = list(oechem.OEGetTorsions(reference_topology.residue_oemol, torsion_predicate))
#relevant_torsion_list = self._select_torsions_without_h(non_rotor_torsions)
relevant_torsion_list = non_rotor_torsions
#now, for each torsion, extract the set of indices and the angle
periodicity = 1
k = 1200.0*unit.kilocalories_per_mole # stddev of 1.2 degrees
#print([atom.name for atom in growth_indices])
_logger.debug(f"\t\t\trelevant torsions for ring restraints being added...")
for torsion in relevant_torsion_list:
#make sure to get the atom index that corresponds to the topology
#atom_indices = [torsion.a.GetData("topology_index"), torsion.b.GetData("topology_index"), torsion.c.GetData("topology_index"), torsion.d.GetData("topology_index")]
oe_atom_indices = [torsion.a.GetIdx(),
torsion.b.GetIdx(),
torsion.c.GetIdx(),
torsion.d.GetIdx()]
if all(_idx in list(reference_topology.reverse_residue_to_oemol_map.keys()) for _idx in oe_atom_indices):
#then every atom in the oemol lives in the openmm topology/residue, so we can consider it
topology_index_map = [reference_topology.reverse_residue_to_oemol_map[q] for q in oe_atom_indices]
else:
topology_index_map = None
# Determine phase in [-pi,+pi) interval
#phase = (np.pi)*units.radians+angle
adjusted_phase = self.adjust_phase(phase = torsion.radians)
#print('PHASE>>>> ' + str(phase)) # DEBUG
if topology_index_map is not None:
growth_idx = self._calculate_growth_idx(topology_index_map, growth_indices)
atom_names = [torsion.a.GetName(), torsion.b.GetName(), torsion.c.GetName(), torsion.d.GetName()]
#print("Adding torsion with atoms %s and growth index %d" %(str(atom_names), growth_idx))
#If this is a CustomTorsionForce, we need to pass the parameters as a list, and it will have the growth_idx parameter.
#If it's a regular PeriodicTorsionForce, there is no growth_index and the parameters are passed separately.
p1, p2, p3, p4 = topology_index_map
possible_omissions = [(p1,p2), (p2,p3), (p3,p4), (p2,p1), (p3,p2), (p4,p3)]
if growth_idx > 0:
if any(torsion_pair in self.omitted_bonds for torsion_pair in possible_omissions):
pass
else:
_torsion_index = torsion_force.addTorsion(topology_index_map[0], topology_index_map[1], topology_index_map[2], topology_index_map[3], [periodicity, adjusted_phase, k, growth_idx])
self.extra_torsion_terms[_torsion_index] = (topology_index_map[0], topology_index_map[1], topology_index_map[2], topology_index_map[3], [periodicity, adjusted_phase, k, growth_idx])
_logger.debug(f"\t\t\t\t{(topology_index_map[0], topology_index_map[1], topology_index_map[2], topology_index_map[3])}")
else:
pass
#we omit terms wherein the growth index only pertains to the
_logger.debug(f"\t\t\trelevant torsions for chirality restraints being added...")
#set chirality restraints (adapted from https://github.com/choderalab/perses/blob/protein_mutations_ivy/perses/rjmc/geometry.py)
#set stereochemistry
#the chirality of the atoms is supposed to be pre-specified by NetworkXMolecule
#render a 3d structure: note that this fucks up the rjmc proposal (since we cannot enumerate the number of possible conformers)
#add the improper torsions associated with the chiral center
coords = reference_topology.residue_oemol.GetCoords()
networkx_graph = reference_topology._get_networkx_molecule()
#CIP_perceptions = {0: 'R', 1: 'S'}
#iterate over all of the atoms with chiral centers
_logger.debug(f"\t\t\t\tnodes: {networkx_graph.nodes()}")
for _node in networkx_graph.nodes(data = True):
_logger.debug(f"\t\t\t\tquerying node {_node[0]}")
_logger.debug(f"\t\t\t\tnode attributes: {_node[1]}")
if _node[1]['oechem_atom'].IsChiral():
_logger.debug(f"\t\t\t\tnode is chiral...")
assert(_node[1]['oechem_atom']).HasStereoSpecified(), f"atom {_node[1]['oechem_atom']} is chiral, but the chirality is not specified."
_stereo = stereo = oechem.OEPerceiveCIPStereo(reference_topology.residue_oemol, _node[1]['oechem_atom'])
#_logger.debug(f"\t\t\t\t\tis chiral with CIP: {CIP_perceptions[_stereo]}")
#get the neighbors
#nbrs_top : list(int) of topology indices
#nbrs_oemol : list(int) of oemol indices of neighbor
#nbrs : list(OEAtomBase) of the oemol atoms of neighbors
#get the neighbors of the chiral atom of interest
nbrs_top, nbrs_oemol, nbrs = [], [], []
for nbr in networkx_graph[_node[0]]:
nbrs_top.append(nbr)
nbrs_oemol.append(reference_topology.residue_to_oemol_map[nbr])
nbrs.append(networkx_graph.nodes[nbr]['oechem_atom'])
_logger.debug(f"\t\t\t\t\tquerying neighbors: {nbrs_top} with data: {[networkx_graph.nodes[lst_nbr]['openmm_atom'] for lst_nbr in nbrs_top]}")
growth_idx = self._calculate_growth_idx(nbrs_top, growth_indices)
_logger.debug(f"\t\t\t\t\tthe growth index of the neighbors is {growth_idx}")
if growth_idx > 0:
if len(list(networkx_graph[_node[0]])) == 4:
_logger.debug(f"\t\t\t\t\tthe number of neighbors is 4; proceeding")
# TODO: handle chiral centers where the valency of the chiral center > 4
#specify the atom order for calculating the angle
#the order of the improper torsion will be as follows (p1, p2, p3, p4):
#p1: the neighbor of the chiral atom whose growth index is minimally greater than the growth index of the chiral center
#p2: the chiral center
#p3: the neighbor of the chiral center whose growth index is maximally less than (or equal to) the growth index of the chiral center
#p4: the neighbor of the chiral atom whose growth index is minimally greater than the growth index of p1
_node_growth_index = self._calculate_growth_idx([_node[0]], growth_indices)
nbr_growth_indices = [self._calculate_growth_idx([q], growth_indices) for q in nbrs_top]
_nbr_to_growth_index_tuple = [(_nbr, _idx) for _nbr, _idx in zip(nbrs_top, nbr_growth_indices)]
_logger.debug(f"\t\t\t\t\tgrowth index of node: {_node_growth_index}")
_logger.debug(f"\t\t\t\t\tgrowth indices of neighbors: {_nbr_to_growth_index_tuple}")
if [tup[1] for tup in _nbr_to_growth_index_tuple].count(0) == 3:
_logger.warning(f"\t\t\t\t\tchiral atom {_node[1]['openmm_atom']} with neighbors {[networkx_graph.nodes[lst_nbr]['openmm_atom'] for lst_nbr in nbrs_top]} is surrounded by 3 core neighbors. omitting chirality bias torsion")
else:
#find p1:
p1_target_growth_index = min(tup[1] for tup in _nbr_to_growth_index_tuple if tup[1] > _node_growth_index)
p1 = [q[0] for q in _nbr_to_growth_index_tuple if q[1] == p1_target_growth_index][0] #take the first hit
#find p2:
p2 = _node[0]
#find p3:
p3_target_growth_index = max(tup[1] for tup in _nbr_to_growth_index_tuple if tup[1] <= _node_growth_index)
p3 = [q[0] for q in _nbr_to_growth_index_tuple if q[1] == p3_target_growth_index][0] #take the first hit
#find p4:
p4_target_growth_index = min(tup[1] for tup in _nbr_to_growth_index_tuple if tup[1] > p1_target_growth_index)
p4 = [q[0] for q in _nbr_to_growth_index_tuple if q[1] == p4_target_growth_index][0] #take the first hit
_logger.debug(f"\t\t\t\t\tgrowth index carrying this improper: {p4_target_growth_index}")
#now convert p1-p4 to oemol indices
oemol_indices = [reference_topology.residue_to_oemol_map[q] for q in [p1, p2, p3, p4]]
#calculate the improper torsion
# coords is dict of {idx: (x_0, y_0, z_0)}
phase = coordinate_numba.cartesian_to_internal(np.array(coords[oemol_indices[0]], dtype = 'float64'),
np.array(coords[oemol_indices[1]], dtype = 'float64'),
np.array(coords[oemol_indices[2]], dtype = 'float64'),
np.array(coords[oemol_indices[3]], dtype = 'float64'))[2]
adjusted_phase = self.adjust_phase(phase = phase)
growth_idx = self._calculate_growth_idx(nbrs_top, growth_indices)
_torsion_index = torsion_force.addTorsion(p1, p2, p3, p4, [periodicity, adjusted_phase, k, p4_target_growth_index])
self.extra_torsion_terms[_torsion_index] = (p1, p2, p3, p4, [periodicity, adjusted_phase, k, p4_target_growth_index])
_logger.debug(f"\t\t\t\t\t{(p1, p2, p3, p4)}, phase : {adjusted_phase}")
else:
#the atom of interest must have 4 substitutents to be chiral; TODO: chirality can also be maintained with >4 atoms.
pass
return torsion_force
def adjust_phase(self, phase):
"""
Utility function to adjust the phase properly
Parameters
----------
phase : float
phase angle
Returns
-------
adjusted_phase : float * unit.radians
adjusted phase with convention
"""
phase = phase + np.pi # TODO: Check that this is the correct convention?
while (phase >= np.pi):
phase -= 2*np.pi
while (phase < -np.pi):
phase += 2*np.pi
phase *= unit.radian
adjusted_phase = phase
return adjusted_phase
def _select_torsions_without_h(self, torsion_list):
"""
Return only torsions that do not contain hydrogen
Parameters
----------
torsion_list : list of oechem.OETorsion
Returns
-------
heavy_torsions : list of oechem.OETorsion
"""
heavy_torsions = []
for torsion in torsion_list:
is_h_present = [torsion.a.IsHydrogen(), torsion.b.IsHydrogen(), torsion.c.IsHydrogen(), torsion.d.IsHydrogen()]
if all(entry == False for entry in is_h_present):
heavy_torsions.append(torsion)
else:
#there is a hydrogen in this torsion, so it is omitted
pass
return heavy_torsions
def _determine_extra_angles(self, angle_force, reference_topology, growth_indices):
"""
Determine extra angles to be placed on aromatic ring members. Sometimes,
the native angle force is too weak to efficiently close the ring. As with the
torsion force, this method assumes that only one residue is changing at a time.
Parameters
----------
angle_force : simtk.openmm.CustomAngleForce
the force to which additional terms will be added
reference_topology : simtk.openmm.app.Topology
new/old topology if forward/backward
growth_indices : list of int
atom growth indices
Returns
-------
angle_force : simtk.openmm.CustomAngleForce
The modified angle force
"""
from simtk import openmm
import itertools
from openeye import oechem, oeomega
if len(growth_indices)==0:
return
angle_force_constant = 400.0*unit.kilojoules_per_mole/unit.radians**2
atoms = list(reference_topology.atoms())
growth_indices = list(growth_indices)
#get residue from first atom
residue = atoms[growth_indices[0].idx].residue
try:
oemol = FFAllAngleGeometryEngine._oemol_from_residue(residue)
except Exception as e:
print("Could not generate an oemol from the residue.")
print(e)
#get the omega geometry of the molecule:
omega = oeomega.OEOmega()
omega.SetMaxConfs(1)
omega.SetStrictStereo(False) #TODO: fix stereochem
omega(oemol)
#we now have the residue as an oemol. Time to find the relevant angles.
#There's no equivalent to OEGetTorsions, so first find atoms that are relevant
#TODO: find out if that's really true
aromatic_pred = oechem.OEIsAromaticAtom()
heavy_pred = oechem.OEIsHeavy()
angle_criteria = oechem.OEAndAtom(aromatic_pred, heavy_pred)
#get all heavy aromatic atoms:
#TODO: do this more efficiently
heavy_aromatics = list(oemol.GetAtoms(angle_criteria))
for atom in heavy_aromatics:
#bonded_atoms = [bonded_atom for bonded_atom in list(atom.GetAtoms()) if bonded_atom in heavy_aromatics]
bonded_atoms = list(atom.GetAtoms())
for angle_atoms in itertools.combinations(bonded_atoms, 2):
angle = oechem.OEGetAngle(oemol, angle_atoms[0], atom, angle_atoms[1])
atom_indices = [angle_atoms[0].GetData("topology_index"), atom.GetData("topology_index"), angle_atoms[1].GetData("topology_index")]
angle_radians = angle*unit.radian
growth_idx = self._calculate_growth_idx(atom_indices, growth_indices)
#If this is a CustomAngleForce, we need to pass the parameters as a list, and it will have the growth_idx parameter.
#If it's a regular HarmonicAngleForce, there is no growth_index and the parameters are passed separately.
if isinstance(angle_force, openmm.CustomAngleForce):
angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], [angle_radians, angle_force_constant, growth_idx])
elif isinstance(angle_force, openmm.HarmonicAngleForce):
angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], angle_radians, angle_force_constant)
else:
raise ValueError("Angle force must be either CustomAngleForce or HarmonicAngleForce")
return angle_force
def _calculate_growth_idx(self, particle_indices, growth_indices):
"""
Utility function to calculate the growth index of a particular force.
For each particle index, it will check to see if it is in growth_indices.
If not, 0 is added to an array, if yes, the index in growth_indices is added.
Finally, the method returns the max of the accumulated array
Parameters
----------
particle_indices : list of int
The indices of particles involved in this force
growth_indices : list of int
The ordered list of indices for atom position proposals
Returns
-------
growth_idx : int
The growth_idx parameter
"""
particle_indices_set = set(particle_indices)
growth_indices_set = set(growth_indices)
new_atoms_in_force = particle_indices_set.intersection(growth_indices_set)
if len(new_atoms_in_force) == 0:
return 0
new_atom_growth_order = [growth_indices.index(atom_idx)+1 for atom_idx in new_atoms_in_force]
return max(new_atom_growth_order)
class NetworkXProposalOrder(object):
"""
This is a proposal order generating object that uses just networkx and graph traversal for simplicity.
"""
def __init__(self, topology_proposal, direction="forward"):
"""
Create a NetworkXProposalOrder class
Parameters
----------
topology_proposal : perses.rjmc.topology_proposal.TopologyProposal
Container class for the transformation
direction: str, default forward
Whether to go forward or in reverse for the proposal.
TODO : reorganize this
"""
from simtk.openmm import app
self._topology_proposal = topology_proposal
self._direction = direction
self._hydrogen = app.Element.getByAtomicNumber(1.0)
# Set the direction
if direction == "forward":
self._destination_system = self._topology_proposal.new_system
self._new_atoms = self._topology_proposal.unique_new_atoms
self._destination_topology = self._topology_proposal.new_topology
self._atoms_with_positions = self._topology_proposal.new_to_old_atom_map.keys()
_nx_graph = self._topology_proposal._new_topology._get_networkx_molecule()
elif direction == "reverse":
self._destination_system = self._topology_proposal.old_system
self._new_atoms = self._topology_proposal.unique_old_atoms
self._destination_topology = self._topology_proposal.old_topology
self._atoms_with_positions = self._topology_proposal.old_to_new_atom_map.keys()
_nx_graph = self._topology_proposal._old_topology._get_networkx_molecule()
else:
raise ValueError("Direction must be either forward or reverse.")
self._new_atom_objects = list(self._destination_topology.atoms())
self._new_atoms_to_place = [atom for atom in self._destination_topology.atoms() if atom.index in self._new_atoms]
self._atoms_with_positions_set = set(self._atoms_with_positions)
self._hydrogens = []
self._heavy = []
# Sort the new atoms into hydrogen and heavy atoms:
for atom in self._new_atoms_to_place:
if atom.element == self._hydrogen:
self._hydrogens.append(atom.index)
else:
self._heavy.append(atom.index)
# Sanity check
if len(self._hydrogens)==0 and len(self._heavy)==0:
msg = 'NetworkXProposalOrder: No new atoms for direction {}\n'.format(direction)
msg += str(topology_proposal)
raise Exception(msg)
# Choose the first of the new atoms to find the corresponding residue:
#transforming_residue = self._new_atom_objects[self._new_atoms[0]].residue
self._residue_graph = _nx_graph
self._reference_connectivity_graph = self._create_reference_connectivity_graph()
def _create_reference_connectivity_graph(self):
"""
utility method to create a reference connectivity graph to check for omitted valence terms (the primary use of this graph is to check for ring closures)
"""
#take the self._residue_graph and create a replicate (without the misc attributes) with the atoms_with_positions
_reference_connectivity_graph = nx.Graph()
atoms_with_positions = set(self._atoms_with_positions)
#iterate over all the bonds
for bond in self._residue_graph.edges():
if set(bond).issubset(atoms_with_positions):
#if both of the atoms in the bond are in atoms_with_positions, we can add the atoms/bonds to the reference
_reference_connectivity_graph.add_edge(*bond)
return _reference_connectivity_graph
def determine_proposal_order(self):
"""
Determine the proposal order of this system pair.
This includes the choice of a torsion. As such, a logp is returned.
Parameters
----------
direction : str, optional
whether to determine the forward or reverse proposal order
Returns
-------
atom_torsions : list of list of int
A list of torsions, where the first atom in the torsion is the one being proposed
logp_torsion_choice : list
log probability of the chosen torsions as a list of sequential atom placements
omitted_bonds : list of tuples
list of tuples of atom_indices
#this is used when creating the growth system generator and the atoms_with_positions_system to account for unconnected atoms
"""
heavy_atoms_torsions, heavy_logp = self._propose_atoms_in_order(self._heavy)
hydrogen_atoms_torsions, hydrogen_logp = self._propose_atoms_in_order(self._hydrogens)
proposal_order = heavy_atoms_torsions + hydrogen_atoms_torsions
if len(proposal_order) == 0:
msg = 'NetworkXProposalOrder: proposal_order is empty\n'
raise Exception(msg)
#Check that no atom is placed until each atom in the corresponding torsion is in the set of atoms with positions
_set_of_atoms_with_positions = set(self._atoms_with_positions)
# Now iterate through the proposal_order, ensuring that each atom in the corresponding torsion list is in the _set_of_atoms_with_positions (appending to the set after each placement)
for torsion in proposal_order:
assert set(torsion[1:]).issubset(_set_of_atoms_with_positions), "Proposal Order Issue: a torsion atom is not position-defined"
_set_of_atoms_with_positions.add(torsion[0])
# Ensure lists are not ill-defined
assert heavy_logp + hydrogen_logp != [], "logp list of log_probabilities from torsion choices is an empty list"
assert len(heavy_logp + hydrogen_logp) == len(proposal_order), "There is a mismatch in the size of the atom torsion proposals and the associated logps"
#create a list of omitted_bonds tuples
omitted_bonds = []
omitted_bonds_forward_pass = [edge for edge in self._residue_graph.edges() if edge not in list(self._reference_connectivity_graph.edges())]
for omitted_bond in omitted_bonds_forward_pass:
if omitted_bond[::-1] not in list(self._reference_connectivity_graph.edges()):
omitted_bonds.append(omitted_bond)
#delete the residue graph and reference connectivity graph since they cannot be pickled...
del self._residue_graph
del self._reference_connectivity_graph
return proposal_order, heavy_logp + hydrogen_logp, omitted_bonds
def _propose_atoms_in_order(self, atom_group):
"""
Propose a group of atoms along with corresponding torsions and a total log probability for the choice
Parameters
----------
atom_group : list of int
The atoms to propose
Returns
-------
atom_torsions : list of list of int
A list of torsions, where the atom_torsions[0] is the one being proposed
logp : list
The contribution to the overall proposal log probability as a list of sequential logps
"""
atom_torsions= []
logp = []
assert len(atom_group) == len(set(atom_group)), "There are duplicate atom indices in the list of atom proposal indices"
while len(atom_group) > 0:
#initialise an eligible_torsions_list
eligible_torsions_list = list()
for atom_index in atom_group:
# Find the shortest path up to length four from the atom in question:
shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)
# Loop through the destination and path of each path and append to eligible_torsions_list
# if destination has a position and path[1:3] is a subset of atoms with positions
for destination, path in shortest_paths.items():
# Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.
if len(path) != 4 or destination not in self._atoms_with_positions_set:
continue
# If the last atom is in atoms with positions, check to see if the others are also.
# If they are, append the torsion to the list of possible torsions to propose
if set(path[1:3]).issubset(self._atoms_with_positions_set):
eligible_torsions_list.append(path)
assert len(eligible_torsions_list) != 0, "There is a connectivity issue; there are no torsions from which to choose"
#now we have to randomly choose a single torsion
ntorsions = len(eligible_torsions_list)
random_torsion_index = np.random.choice(range(ntorsions))
random_torsion = eligible_torsions_list[random_torsion_index]
#append random torsion to the atom_torsions and remove source atom from the atom_group
chosen_atom_index = random_torsion[0]
first_old_atom_index = random_torsion[1]
atom_torsions.append(random_torsion)
atom_group.remove(chosen_atom_index)
#add atom to atoms with positions and corresponding set
self._atoms_with_positions_set.add(chosen_atom_index)
#add a bond from the new to the previous torsion atom in the _reference_connectivity_graph
self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)
#add the log probability of the choice to logp
logp.append(np.log(1./ntorsions))
# Ensure that logp is not ill-defined
assert len(logp) == len(atom_torsions), "There is a mismatch in the size of the atom torsion proposals and the associated logps"
return atom_torsions, logp
class NoTorsionError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(NoTorsionError, self).__init__(message)
| choderalab/perses | perses/rjmc/geometry.py | Python | mit | 141,869 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.datasets.dataset import Dataset as UrbansimDataset
from opus_core.logger import logger
class NodeTravelDataDataset(UrbansimDataset):
id_name_default = ["from_node_id", "to_node_id"]
in_table_name_default = "node_travel_data"
out_table_name_default = "node_travel_data"
dataset_name = "node_travel_data"
def __init__(self, id_values=None, **kwargs):
UrbansimDataset.__init__(self, **kwargs)
def _get_attribute_sum_from_path(self, name, path):
"""Returns a sum of values of the given attribute along the given path."""
result = 0
for step in range(len(path)-1):
try:
#print " (%s) %s (%s)" % (path[step], self.get_attribute_by_id(name, [[path[step], path[step+1]]]), path[step+1])
result = result + self.get_attribute_by_id(name, [[path[step], path[step+1]]])
except:
logger.log_warning("Path from %s to %s not found." % (path[step], path[step+1]))
return result | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim/datasets/node_travel_data_dataset.py | Python | gpl-2.0 | 1,176 |
#!/usr/bin/env python
'''
Copyright (c) 2016 anti-XSS developers
'''
class Links(object):
'''
Links class used as a global var.
'''
content = []
def __init__(self):
pass
def addText(self, text):
self.content.append(text)
def setContent(self, content):
self.content = content
def getContent(self):
return self.content
| lewangbtcc/anti-XSS | lib/var/links.py | Python | mit | 388 |
# -*- coding: utf-8 -*-
#
# scanpdf documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 23 13:43:29 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Scan PDF'
copyright = u'2014, Virantha N. Ekanayake'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
try:
release = pkg_resources.get_distribution('scanpdf').version
except pkg_resources.DistributionNotFound:
print 'To build the documentation, The distribution information of scanpdf'
print 'Has to be available. Either install the package into your'
print 'development environment or run "setup.py develop" to setup the'
print 'metadata. A virtualenv is recommended!'
sys.exit(1)
del pkg_resources
version = '.'.join(release.split('.')[:2])
# The full version, including alpha/beta/rc tags.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'scanpdfdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'scanpdf.tex', u'Scan PDF Documentation',
u'Virantha N. Ekanayake', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scanpdf', u'Scan PDF Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'scanpdf', u'Scan PDF Documentation',
u'Author', 'scanpdf', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'scanpdf'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2013, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'scanpdf'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True | virantha/scanpdf | docs/conf.py | Python | apache-2.0 | 10,696 |
# Bring in all of the public TensorFlow interface into this
# module.
# pylint: disable=wildcard-import
from tensorflow.python import *
| liyu1990/tensorflow | tensorflow/__init__.py | Python | apache-2.0 | 136 |
# maintained by triplefox
# Copyright (c) James Hofmann 2012.
# This file is part of pyspades.
# pyspades is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyspades is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyspades. If not, see <http://www.gnu.org/licenses/>.
from twisted.internet.reactor import seconds
from scheduler import Scheduler
from commands import name, add, get_player, join_arguments, InvalidPlayer
REQUIRE_REASON = True
S_NO_VOTEKICK = 'No votekick in progress'
S_DEFAULT_REASON = 'NO REASON GIVEN'
S_IN_PROGRESS = 'Votekick already in progress'
S_SELF_VOTEKICK = "You can't votekick yourself"
S_NOT_ENOUGH_PLAYERS = "There aren't enough players to vote"
S_VOTEKICK_IMMUNE = "You can't votekick this player"
S_NOT_YET = "You can't start another votekick yet!"
S_NEED_REASON = 'You must provide a reason for the votekick'
S_CANT_CANCEL = "You didn't start the votekick!"
S_YES = '{player} voted YES'
S_ENDED = 'Votekick for {victim} has ended. {result}'
S_RESULT_TIMED_OUT = 'Votekick timed out'
S_RESULT_CANCELLED = 'Cancelled'
S_RESULT_BANNED = 'Banned by admin'
S_RESULT_KICKED = 'Kicked by admin'
S_RESULT_INSTIGATOR_KICKED = 'Instigator kicked by admin'
S_RESULT_LEFT = '{victim} left during votekick'
S_RESULT_INSTIGATOR_LEFT = 'Instigator {instigator} left'
S_RESULT_PASSED = 'Player kicked'
S_ANNOUNCE_IRC = '* {instigator} started a votekick against player {victim}. ' \
'Reason: {reason}'
S_ANNOUNCE = '{instigator} started a VOTEKICK against {victim}. Say /Y to agree'
S_ANNOUNCE_SELF = 'You started a votekick against {victim}. Say /CANCEL to ' \
'stop it'
S_UPDATE = '{instigator} is votekicking {victim}. /Y to vote ({needed} left)'
S_REASON = 'Reason: {reason}'
class VotekickFailure(Exception):
pass
@name('votekick')
def start_votekick(connection, *args):
protocol = connection.protocol
if connection not in protocol.players:
raise KeyError()
player = connection
if not args:
if protocol.votekick:
# player requested votekick info
protocol.votekick.send_chat_update(player)
return
raise ValueError()
value = args[0]
try:
# vanilla aos behavior
victim = get_player(protocol, '#' + value)
except InvalidPlayer:
victim = get_player(protocol, value)
reason = join_arguments(args[1:])
try:
# attempt to start votekick
votekick = Votekick.start(player, victim, reason)
protocol.votekick = votekick
except VotekickFailure as err:
return str(err)
@name('cancel')
def cancel_votekick(connection):
protocol = connection.protocol
votekick = protocol.votekick
if not votekick:
return S_NO_VOTEKICK
if connection in protocol.players:
player = connection
if (player is not votekick.instigator and not player.admin and
not player.rights.cancel):
return S_CANT_CANCEL
votekick.end(S_RESULT_CANCELLED)
@name('y')
def vote_yes(connection):
protocol = connection.protocol
if connection not in protocol.players:
raise KeyError()
player = connection
votekick = protocol.votekick
if not votekick:
return S_NO_VOTEKICK
votekick.vote(player)
add(start_votekick)
add(cancel_votekick)
add(vote_yes)
class Votekick(object):
duration = 120.0 # 2 minutes
interval = 2 * 60.0 # 3 minutes
ban_duration = 15.0
public_votes = True
schedule = None
def _get_votes_remaining(self):
return self.protocol.get_required_votes() - len(self.votes) + 1
votes_remaining = property(_get_votes_remaining)
@classmethod
def start(cls, instigator, victim, reason = None):
protocol = instigator.protocol
last_votekick = instigator.last_votekick
reason = reason.strip() if reason else None
if protocol.votekick:
raise VotekickFailure(S_IN_PROGRESS)
elif instigator is victim:
raise VotekickFailure(S_SELF_VOTEKICK)
elif protocol.get_required_votes() <= 0:
raise VotekickFailure(S_NOT_ENOUGH_PLAYERS)
elif victim.admin or victim.rights.cancel:
raise VotekickFailure(S_VOTEKICK_IMMUNE)
elif not instigator.admin and (last_votekick is not None and
seconds() - last_votekick < cls.interval):
raise VotekickFailure(S_NOT_YET)
elif REQUIRE_REASON and not reason:
raise VotekickFailure(S_NEED_REASON)
result = protocol.on_votekick_start(instigator, victim, reason)
if result is not None:
raise VotekickFailure(result)
reason = reason or S_DEFAULT_REASON
return cls(instigator, victim, reason)
def __init__(self, instigator, victim, reason):
self.protocol = protocol = instigator.protocol
self.instigator = instigator
self.victim = victim
self.reason = reason
self.votes = {instigator : True}
self.ended = False
protocol.irc_say(S_ANNOUNCE_IRC.format(instigator = instigator.name,
victim = victim.name, reason = self.reason))
protocol.send_chat(S_ANNOUNCE.format(instigator = instigator.name,
victim = victim.name), sender = instigator)
protocol.send_chat(S_REASON.format(reason = self.reason),
sender = instigator)
instigator.send_chat(S_ANNOUNCE_SELF.format(victim = victim.name))
schedule = Scheduler(protocol)
schedule.call_later(self.duration, self.end, S_RESULT_TIMED_OUT)
schedule.loop_call(30.0, self.send_chat_update)
self.schedule = schedule
def vote(self, player):
if self.victim is player:
return
elif player in self.votes:
return
if self.public_votes:
self.protocol.send_chat(S_YES.format(player = player.name))
self.votes[player] = True
if self.votes_remaining <= 0:
# vote passed, ban or kick accordingly
victim = self.victim
self.end(S_RESULT_PASSED)
print '%s votekicked' % victim.name
if self.ban_duration > 0.0:
victim.ban(self.reason, self.ban_duration)
else:
victim.kick(silent = True)
def release(self):
self.instigator = None
self.victim = None
self.votes = None
if self.schedule:
self.schedule.reset()
self.schedule = None
self.protocol.votekick = None
def end(self, result):
self.ended = True
message = S_ENDED.format(victim = self.victim.name, result = result)
self.protocol.send_chat(message, irc = True)
if not self.instigator.admin:
self.instigator.last_votekick = seconds()
self.protocol.on_votekick_end()
self.release()
def send_chat_update(self, target = None):
# send only to target player if provided, otherwise broadcast to server
target = target or self.protocol
target.send_chat(S_UPDATE.format(instigator = self.instigator.name,
victim = self.victim.name, needed = self.votes_remaining))
target.send_chat(S_REASON.format(reason = self.reason))
def apply_script(protocol, connection, config):
Votekick.ban_duration = config.get('votekick_ban_duration', 15.0)
Votekick.public_votes = config.get('votekick_public_votes', True)
required_percentage = config.get('votekick_percentage', 25.0)
class VotekickProtocol(protocol):
votekick = None
def get_required_votes(self):
# votekicks are invalid if this returns <= 0
player_count = sum(not player.disconnected for player in
self.players.itervalues()) - 1
return int(player_count / 100.0 * required_percentage)
def on_map_leave(self):
if self.votekick:
self.votekick.release()
protocol.on_map_leave(self)
def on_ban(self, banee, reason, duration):
votekick = self.votekick
if votekick and votekick.victim is self:
votekick.end(S_RESULT_BANNED)
protocol.on_ban(self, connection, reason, duration)
def on_votekick_start(self, instigator, victim, reason):
pass
def on_votekick_end(self):
pass
class VotekickConnection(connection):
last_votekick = None
def on_disconnect(self):
votekick = self.protocol.votekick
if votekick:
if votekick.victim is self:
# victim leaves, gets votekick ban
reason = votekick.reason
votekick.end(S_RESULT_LEFT.format(victim = self.name))
self.ban(reason, Votekick.ban_duration)
elif votekick.instigator is self:
# instigator leaves, votekick is called off
s = S_RESULT_INSTIGATOR_LEFT.format(instigator = self.name)
votekick.end(s)
else:
# make sure we still have enough players
votekick.votes.pop(self, None)
if votekick.votes_remaining <= 0:
votekick.end(S_NOT_ENOUGH_PLAYERS)
connection.on_disconnect(self)
def kick(self, reason = None, silent = False):
votekick = self.protocol.votekick
if votekick:
if votekick.victim is self:
votekick.end(S_RESULT_KICKED)
elif votekick.instigator is self:
votekick.end(S_RESULT_INSTIGATOR_KICKED)
connection.kick(self, reason, silent)
return VotekickProtocol, VotekickConnection | Lensman/pysnip | feature_server/scripts/votekick.py | Python | gpl-3.0 | 10,354 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the tasks attribute containers."""
from __future__ import unicode_literals
import time
import unittest
import uuid
from plaso.containers import tasks
from tests import test_lib as shared_test_lib
class TaskTest(shared_test_lib.BaseTestCase):
"""Tests for the task attribute container."""
# TODO: replace by GetAttributeNames test
def testCopyToDict(self):
"""Tests the CopyToDict function."""
session_identifier = '{0:s}'.format(uuid.uuid4().hex)
task = tasks.Task(session_identifier=session_identifier)
self.assertIsNotNone(task.identifier)
self.assertIsNotNone(task.start_time)
self.assertIsNone(task.completion_time)
expected_dict = {
'aborted': False,
'has_retry': False,
'identifier': task.identifier,
'session_identifier': task.session_identifier,
'start_time': task.start_time}
test_dict = task.CopyToDict()
self.assertEqual(test_dict, expected_dict)
def testCreateRetryTask(self):
"""Tests the CreateRetryTask function."""
session_identifier = '{0:s}'.format(uuid.uuid4().hex)
task = tasks.Task(session_identifier=session_identifier)
task.path_spec = 'test_path_spec'
retry_task = task.CreateRetryTask()
self.assertNotEqual(retry_task.identifier, task.identifier)
self.assertTrue(task.has_retry)
self.assertFalse(retry_task.has_retry)
self.assertEqual(retry_task.path_spec, task.path_spec)
def testCreateTaskCompletion(self):
"""Tests the CreateTaskCompletion function."""
session_identifier = '{0:s}'.format(uuid.uuid4().hex)
task = tasks.Task(session_identifier=session_identifier)
task_completion = task.CreateTaskCompletion()
self.assertIsNotNone(task_completion)
def testCreateTaskStart(self):
"""Tests the CreateTaskStart function."""
session_identifier = '{0:s}'.format(uuid.uuid4().hex)
task = tasks.Task(session_identifier=session_identifier)
task_start = task.CreateTaskStart()
self.assertIsNotNone(task_start)
def testUpdateProcessingTime(self):
"""Tests the UpdateProcessingTime function."""
session_identifier = '{0:s}'.format(uuid.uuid4().hex)
task = tasks.Task(session_identifier=session_identifier)
self.assertIsNone(task.last_processing_time)
task.UpdateProcessingTime()
self.assertIsNotNone(task.last_processing_time)
class TaskCompletionTest(shared_test_lib.BaseTestCase):
"""Tests for the task completion attribute container."""
# TODO: replace by GetAttributeNames test
def testCopyToDict(self):
"""Tests the CopyToDict function."""
session_identifier = '{0:s}'.format(uuid.uuid4().hex)
timestamp = int(time.time() * 1000000)
task_identifier = '{0:s}'.format(uuid.uuid4().hex)
task_completion = tasks.TaskCompletion(
identifier=task_identifier, session_identifier=session_identifier)
task_completion.timestamp = timestamp
self.assertEqual(task_completion.identifier, task_identifier)
expected_dict = {
'aborted': False,
'identifier': task_completion.identifier,
'session_identifier': task_completion.session_identifier,
'timestamp': timestamp}
test_dict = task_completion.CopyToDict()
self.assertEqual(test_dict, expected_dict)
class TaskStartTest(shared_test_lib.BaseTestCase):
"""Tests for the task start attribute container."""
# TODO: replace by GetAttributeNames test
def testCopyToDict(self):
"""Tests the CopyToDict function."""
session_identifier = '{0:s}'.format(uuid.uuid4().hex)
timestamp = int(time.time() * 1000000)
task_identifier = '{0:s}'.format(uuid.uuid4().hex)
task_start = tasks.TaskStart(
identifier=task_identifier, session_identifier=session_identifier)
task_start.timestamp = timestamp
self.assertEqual(task_start.identifier, task_identifier)
expected_dict = {
'identifier': task_start.identifier,
'session_identifier': session_identifier,
'timestamp': timestamp}
test_dict = task_start.CopyToDict()
self.assertEqual(test_dict, expected_dict)
if __name__ == '__main__':
unittest.main()
| rgayon/plaso | tests/containers/tasks.py | Python | apache-2.0 | 4,191 |
# This file is part of beets.
# Copyright 2012, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# MODIFIED TO WORK WITH HEADPHONES!!
#
__version__ = '1.0b15'
__author__ = 'Adrian Sampson <[email protected]>'
from lib.beets.library import Library
| jimyx17/jimh | lib/beets/__init__.py | Python | gpl-3.0 | 801 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'StatusCheckResult', fields ['time']
db.create_index('cabotapp_statuscheckresult', ['time'])
def backwards(self, orm):
# Removing index on 'StatusCheckResult', fields ['time']
db.delete_index('cabotapp_statuscheckresult', ['time'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cabotapp.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'cabotapp.servicestatussnapshot': {
'Meta': {'object_name': 'ServiceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': "orm['cabotapp.Service']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'cabotapp.shift': {
'Meta': {'object_name': 'Shift'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'uid': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabotapp.statuscheck': {
'Meta': {'ordering': "['name']", 'object_name': 'StatusCheck'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cached_health': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'calculated_status': ('django.db.models.fields.CharField', [], {'default': "'passing'", 'max_length': '50', 'blank': 'True'}),
'check_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'debounce': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'endpoint': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'expected_num_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'default': "'ERROR'", 'max_length': '30'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'max_queued_build_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'metric': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'password': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_cabotapp.statuscheck_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'status_code': ('django.db.models.fields.TextField', [], {'default': '200', 'null': 'True'}),
'text_match': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'default': '30', 'null': 'True'}),
'username': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'verify_ssl_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'cabotapp.statuscheckresult': {
'Meta': {'object_name': 'StatusCheckResult'},
'check': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabotapp.StatusCheck']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'succeeded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_complete': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'})
},
'cabotapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'fallback_alert_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hipchat_alias': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabotapp']
| iurisilvio/cabot | cabot/cabotapp/migrations/0004_auto.py | Python | mit | 10,656 |
#!/usr/bin/env python
from datetime import date
class DataFileWriter:
def __init__(self, filename_base):
self.filename_base = filename_base
def get_filename(self):
return self.filename_base +"_" + date.today().strftime("%Y-%m-%d") + ".csv"
def append(self, value):
with open(self.get_filename(), 'a+') as outfile:
print>>outfile, str(value) | nosyjoe/tempomat | src/file_writer.py | Python | mit | 409 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Subvention.unit_blank_user'
db.add_column(u'accounting_tools_subvention', 'unit_blank_user',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['users.TruffeUser'], null=True, blank=True),
keep_default=False)
# Adding field 'Subvention.unit_blank_name'
db.add_column(u'accounting_tools_subvention', 'unit_blank_name',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Changing field 'Subvention.unit'
db.alter_column(u'accounting_tools_subvention', 'unit_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['units.Unit'], null=True))
def backwards(self, orm):
# Deleting field 'Subvention.unit_blank_user'
db.delete_column(u'accounting_tools_subvention', 'unit_blank_user_id')
# Deleting field 'Subvention.unit_blank_name'
db.delete_column(u'accounting_tools_subvention', 'unit_blank_name')
# Changing field 'Subvention.unit'
db.alter_column(u'accounting_tools_subvention', 'unit_id', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['units.Unit']))
models = {
u'accounting_core.accountingyear': {
'Meta': {'object_name': 'AccountingYear'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_preparing'", 'max_length': '255'}),
'subvention_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting_tools.subvention': {
'Meta': {'unique_together': "(('unit', 'accounting_year'),)", 'object_name': 'Subvention'},
'accounting_year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting_core.AccountingYear']"}),
'amount_asked': ('django.db.models.fields.SmallIntegerField', [], {}),
'amount_given': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment_root': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'mobility_asked': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mobility_given': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'unit_blank_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'unit_blank_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']", 'null': 'True', 'blank': 'True'})
},
u'accounting_tools.subventionline': {
'Meta': {'object_name': 'SubventionLine'},
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nb_spec': ('django.db.models.fields.SmallIntegerField', [], {}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'accounting_tools.subventionlogging': {
'Meta': {'object_name': 'SubventionLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['accounting_tools.Subvention']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'units.unit': {
'Meta': {'object_name': 'Unit'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_epfl': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'is_commission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_equipe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent_hierarchique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'email_perso': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['accounting_tools'] | ArcaniteSolutions/truffe2 | truffe2/accounting_tools/migrations/0003_auto__add_field_subvention_unit_blank_user__add_field_subvention_unit_.py | Python | bsd-2-clause | 10,405 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Installs and configures Ironic
"""
from packstack.installer import utils
from packstack.installer import validators
from packstack.installer import processors
from packstack.modules.shortcuts import get_mq
from packstack.modules.ospluginutils import appendManifestFile
from packstack.modules.ospluginutils import createFirewallResources
from packstack.modules.ospluginutils import getManifestTemplate
# ------------------ Ironic Packstack Plugin initialization ------------------
PLUGIN_NAME = "OS-Ironic"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
ironic_params = [
{"CONF_NAME": "CONFIG_IRONIC_DB_PW",
"CMD_OPTION": "os-ironic-db-passwd",
"PROMPT": "Enter the password for the Ironic DB user",
"USAGE": "The password to use for the Ironic DB access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"USE_DEFAULT": True,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CONF_NAME": "CONFIG_IRONIC_KS_PW",
"CMD_OPTION": "os-ironic-ks-passwd",
"USAGE": ("The password to use for Ironic to authenticate "
"with Keystone"),
"PROMPT": "Enter the password for Ironic Keystone access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"USE_DEFAULT": True,
"NEED_CONFIRM": True,
"CONDITION": False},
]
ironic_group = {"GROUP_NAME": "IRONIC",
"DESCRIPTION": "Ironic Options",
"PRE_CONDITION": "CONFIG_IRONIC_INSTALL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(ironic_group, ironic_params)
def initSequences(controller):
if controller.CONF['CONFIG_IRONIC_INSTALL'] != 'y':
return
steps = [
{'title': 'Adding Ironic Keystone manifest entries',
'functions': [create_keystone_manifest]},
{'title': 'Adding Ironic manifest entries',
'functions': [create_manifest]},
]
controller.addSequence("Installing OpenStack Ironic", [], [],
steps)
# -------------------------- step functions --------------------------
def create_manifest(config, messages):
if config['CONFIG_UNSUPPORTED'] != 'y':
config['CONFIG_STORAGE_HOST'] = config['CONFIG_CONTROLLER_HOST']
manifestfile = "%s_ironic.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate(get_mq(config, "ironic"))
manifestdata += getManifestTemplate("ironic.pp")
fw_details = dict()
key = "ironic-api"
fw_details.setdefault(key, {})
fw_details[key]['host'] = "ALL"
fw_details[key]['service_name'] = "ironic-api"
fw_details[key]['chain'] = "INPUT"
fw_details[key]['ports'] = ['6385']
fw_details[key]['proto'] = "tcp"
config['FIREWALL_IRONIC_API_RULES'] = fw_details
manifestdata += createFirewallResources('FIREWALL_IRONIC_API_RULES')
appendManifestFile(manifestfile, manifestdata, 'pre')
def create_keystone_manifest(config, messages):
if config['CONFIG_UNSUPPORTED'] != 'y':
config['CONFIG_IRONIC_HOST'] = config['CONFIG_CONTROLLER_HOST']
manifestfile = "%s_keystone.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("keystone_ironic.pp")
appendManifestFile(manifestfile, manifestdata)
| palmer159/openstack-test | packstack/plugins/ironic_275.py | Python | apache-2.0 | 4,394 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#use it to generate key table between date1 et date2
import datetime,os,subprocess,shlex,time
import thread
import threading
from hashlib import md5
import pickle
from cStringIO import StringIO
ratio_integer_value=60 #crucial parameter : 1 mean one key per second #30 mean one key per 30 sec #60 mean one key per min (more you approach 1 more the keyfile obj is big)
ratio=float(float(1)/float(ratio_integer_value))
class FuncThread(threading.Thread):
def __init__(self, target, *args):
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
self._target(*self._args)
def checking_dirpath_content_lenght(path,content_len):
if len(os.listdir(path))==content_len:
return True
else:
return False
def wainting_to_make_obj(path,content_len,day_list_keyname):
while checking_dirpath_content_lenght(path,content_len)==False:
print "waiting all key for the day "+path
time.sleep(1)
print "ok ! "
final_obj=[]
for each_k in day_list_keyname:
tempkey=""
with open(path+os.sep+each_k[2]+'.pem','rb') as filekey:
tempkey=filekey.read()
tempkey=tempkey.replace('-----BEGIN RSA PRIVATE KEY-----\n','').replace('\n-----END RSA PRIVATE KEY-----\n','').replace('\n','')
key_line=[each_k[0],each_k[1],tempkey]
final_obj.append(key_line)
#run thread wainting that all day are ziped to zip-it
output = open("data"+os.sep+part_a+'.pkl', 'wb')
pickle.dump(final_obj, output, -1)
output.close()
def runnowait(mycommand):
args=shlex.split(mycommand)
p=subprocess.Popen(args)
d = datetime.datetime.strptime('04 Aug 2013', '%d %b %Y')
e = datetime.datetime.strptime('05 Aug 2013', '%d %b %Y')
delta = datetime.timedelta(days=2)
path="data"
try:
os.mkdir(path)
except:
pass
cp_t=0
th_l=[]
while d <= e:
day_table=[]
day_list_keyname=[]
part_a = d.strftime("%y%m%d")
try:
os.mkdir(path+os.sep+part_a)
except:
pass
for i in range(0,int(86400*ratio)):
print "Generating "+str(i)
part_b = str(int(i*(1/ratio))).zfill(5)
day_table.append([part_a,part_b,[]])
#multithread add generate key for this
keyname=md5(part_a+part_b).hexdigest()
day_list_keyname.append([part_a,part_b,keyname])
filename=path+os.sep+part_a+os.sep+keyname+'.pem'
try:
with open(filename): pass
except IOError:
#print 'Oh dear.'
runnowait('openssl genrsa -out '+filename+' 2048')
pass
#print checking_dirpath_content_lenght()
th_l.append(FuncThread(wainting_to_make_obj, path+os.sep+part_a,len(day_list_keyname),day_list_keyname))
th_l[cp_t].start()
d += delta
cp_t=cp_t+1
print "exit"
for i in range(0,len(th_l)):
th_l[i].join()
print "ending"
| cellus-sas/heavy-encypted-chat | serverside_generating_pwd_table/run.py | Python | mit | 2,750 |
# Get the default preferences setup
DEFAULT_PREFERENCES = {'multiprocessing': False, # Should be False for non-qsub
'ram_integrate': False,
"flip_beam" : True, # Needed for XDS
"analysis": True, #Run analysis on processed data
"pdbquery": True, #Run pdbquery on processed data
"clean_up": True, # clean up
"json": True, # send output as json back to DB
"show_plots": False, # plots for command line
"progress": False, # progress bar for command line
"spacegroup_decider": 'auto', # choices=["auto", "pointless", "xds"],
"computer_cluster": True,
#"rounds_polishing": 1, # not used yet...
}
| RAPD/RAPD | src/plugins/integrate/info.py | Python | agpl-3.0 | 866 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***********************************
espressopp.integrator.DPDThermostat
***********************************
.. function:: espressopp.integrator.DPDThermostat(system, vl)
:param system:
:param vl:
:type system:
:type vl:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_DPDThermostat
class DPDThermostatLocal(ExtensionLocal, integrator_DPDThermostat):
def __init__(self, system, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_DPDThermostat, system, vl)
#def enableAdress(self):
# if pmi.workerIsActive():
# self.cxxclass.enableAdress(self);
if pmi.isController :
class DPDThermostat(Extension, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.integrator.DPDThermostatLocal',
pmiproperty = [ 'gamma', 'tgamma', 'temperature' ]
)
| espressopp/espressopp | src/integrator/DPDThermostat.py | Python | gpl-3.0 | 1,962 |
'''
Split the regridded HI data from 14B-088 into individual channels.
'''
from casa_tools import ms_split_by_channel
vis = "/global/scratch/ekoch/combined/14B-088_HI_LSRK_AT0206_regrid.ms.contsub"
output_dir = "/global/scratch/ekoch/combined/14B-088_channel_ms/"
start_chan = 11
nchan = 205
ms_split_by_channel(vis, nchan=nchan, start=start_chan,
output_dir=output_dir, datacolumn='DATA')
| e-koch/VLA_Lband | 14B-088/HI/archival_combination/14B-088_channel_split.py | Python | mit | 415 |
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
def test_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
share = 'ip:/share'
drv._clone_volume(volume_name, clone_name, volume_id, share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
LOG.warn(_("Share %(share)s and file name %(file_name)s")
% {'share': share, 'file_name': file_name})
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image(volume, ('image_location', None), 'image_id', {})
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
'mount_point': '/opt/stack/data/glance',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
volume_info = self._driver.create_volume(FakeVolume(1))
self.assertEqual(volume_info.get('provider_location'),
fake_share)
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
with mock.patch.object(drv,
'_set_qos_policy_group_on_volume'
) as mock_set_qos:
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual(volume_info.get('provider_location'),
'localhost:myshare')
mock_set_qos.assert_called_once_with(fake_volume,
fake_share,
fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value="")
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
def _custom_setup(self):
self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
configuration=create_configuration())
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv._client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
drv._client.set_api_version(1, 9)
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
mox.StubOutWithMock(drv, '_get_actual_path_for_export')
mox.StubOutWithMock(drv, '_start_clone')
mox.StubOutWithMock(drv, '_wait_for_clone_finish')
if status == 'fail':
mox.StubOutWithMock(drv, '_clear_clone')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
if status == 'fail':
drv._wait_for_clone_finish('1', '2').AndRaise(
api.NaApiError('error', 'error'))
drv._clear_clone('1')
else:
drv._wait_for_clone_finish('1', '2')
return mox
def test_clone_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
try:
drv._clone_volume(volume_name, clone_name, volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
else:
raise
mox.VerifyAll()
| theanalyst/cinder | cinder/tests/test_netapp_nfs.py | Python | apache-2.0 | 47,748 |
from .instruction import Instruction
from .level import Level
from .block import Block
from .toolbox import Toolbox
from .category import Category
from .task import Task
| adaptive-learning/flocs-web | tasks/models/__init__.py | Python | gpl-3.0 | 170 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
Main function.
"""
import os
import sys
import cherrypy
from core import Core
#-------------------------------------------------------------------------------
def CORS():
"""
CORS
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*" # mean: CORS to
#-------------------------------------------------------------------------------
def err_tb():
"""
replace the default error response
with an cgitb HTML traceback
"""
import cgitb
tb = cgitb.html(sys.exc_info())
def set_tb():
""" set the traceback output """
cherrypy.response.body = tb
cherrypy.response.headers['Content-Length'] = None
cherrypy.request.hooks.attach('after_error_response', set_tb)
if __name__ == '__main__':
cherrypy.tools.CORS = cherrypy.Tool('before_handler', CORS)
## config file and location settings
CONF_FILE_REL = sys.argv[1] if len(sys.argv) == 2 and os.path.isfile(sys.argv[1]) else "core.conf"
LOCAL_CONF_REL = "local.conf"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CONF_FILE_ABS = os.path.join(BASE_DIR, CONF_FILE_REL)
LOCAL_CONF_FILE = os.path.join(BASE_DIR, LOCAL_CONF_REL)
if not os.path.isfile(LOCAL_CONF_FILE):
print("Error: the conf file is missing, ")
sys.exit(-1)
if not os.path.isfile(CONF_FILE_ABS):
print("Error: the conf file is missing, ")
sys.exit(-1)
cherrypy.config.update(CONF_FILE_ABS)
cherrypy.config.update(LOCAL_CONF_FILE)
cherrypy.log.error_log.setLevel('ERROR')
cherrypy.tools.cgitb = cherrypy.Tool('before_error_response', err_tb)
cherrypy.quickstart(Core.get_instance(), config=CONF_FILE_ABS)
| mcolom/ipolDevel | ipol_demo/modules/core/main.py | Python | agpl-3.0 | 2,239 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets a user by its id.
To create users, run create_user.py."""
# Import appropriate modules from the client library.
from googleads import dfp
USER_ID = 'INSERT_USER_ID_TO_FIND_HERE'
def main(client, user_id):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201508')
# Create query.
values = [{
'key': 'userId',
'value': {
'xsi_type': 'NumberValue',
'value': user_id
}
}]
query = 'WHERE id = :userId'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Get users by statement.
response = user_service.getUsersByStatement(statement.ToStatement())
users = response['results'] if 'results' in response else []
for user in users:
# Display results.
print ('User with id \'%s\', email \'%s\', and role \'%s\' was found.'
% (user['id'], user['email'], user['roleName']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, USER_ID)
| richardfergie/googleads-python-lib | examples/dfp/v201508/user_service/get_users_by_statement.py | Python | apache-2.0 | 1,698 |
"""SCons.Tool.BitKeeper.py
Tool-specific initialization for the BitKeeper source code control
system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/BitKeeper.py 2013/03/03 09:48:35 garyo"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
BitKeeper to an Environment."""
def BitKeeperFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The BitKeeper() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action("$BITKEEPERCOM", "$BITKEEPERCOMSTR")
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'BitKeeper', BitKeeperFactory)
env.BitKeeper = BitKeeperFactory
env['BITKEEPER'] = 'bk'
env['BITKEEPERGET'] = '$BITKEEPER get'
env['BITKEEPERGETFLAGS'] = SCons.Util.CLVar('')
env['BITKEEPERCOM'] = '$BITKEEPERGET $BITKEEPERGETFLAGS $TARGET'
def exists(env):
return env.Detect('bk')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| aubreyrjones/libesp | scons_local/scons-local-2.3.0/SCons/Tool/BitKeeper.py | Python | mit | 2,498 |
#!/usr/bin/env python
import socket,sys,gateway_cfg,select,socketserver,http.server,urllib
from threading import *
class WebHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
parseParams = urllib.parse.urlparse(self.path)
if parseParams.path=="/t" :
self.send_error(404,"You can't pass!!")
else:
self.send_response(200)
self.send_header('Content-Type', 'application/html')
self.end_headers()
self.wfile.write("Hello World!!")
self.wfile.close()
class webserver (Thread):
def __init__(self,condition):
#init
Thread.__init__(self)
self.con = condition
def run(self):
#run
print("web server start!!")
Handler = WebHandler
httpd = http.server.HTTPServer(("", 8080), Handler)
httpd.serve_forever()
class msgcenter (Thread):
def __init__(self,condition):
#init server setting
Thread.__init__(self)
self.con = condition
try:
print("start config")
self.server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.server.bind((gateway_cfg.address['host'],gateway_cfg.address['port']))
self.server.listen(gateway_cfg.max_user)
self.break_out = False
except socket.error as msg:
print("[ERROR] %s\n" % msg)
self.break_out = True
def run(self):
#start
if self.break_out == False:
print("msgcenter start!!")
while True:
try:
connection,address = self.server.accept()
connection.setblocking(0)
connection.close()
except IOError as e:
if e.errno == 11:
raise
else:
print("socket error")
exit(-1)
| fucxy/fucxy-node | gateway/modules.py | Python | gpl-3.0 | 1,672 |
#!/usr/bin/env python
#coding:utf-8
import sys,os
from toughportal.common import utils
import shutil
import time
import random
import ConfigParser
def gen_secret(clen=32):
rg = random.SystemRandom()
r = list('1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
return ''.join([rg.choice(r) for _ in range(clen)])
| talkincode/ToughPORTAL | toughportal/common/secret.py | Python | agpl-3.0 | 344 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import csv
from datetime import datetime
import os
import argparse
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import and_
from constants import CONST
from models import Marker
from utilities import init_flask
import importmail
from xml.dom import minidom
import math
import requests
import logging
############################################################################################
# United.py is responsible for the parsing and deployment of "united hatzala" data to the DB
############################################################################################
PROVIDER_CODE = CONST.UNITED_HATZALA_CODE
TIME_ZONE = 2
# convert IMS hours code to hours
RAIN_DURATION_CODE_TO_HOURS = {"1": 6, "2": 12, "3": 18, "4": 24, "/": 24, "5": 1, "6": 2, "7": 3, "8": 9, "9": 15}
WEATHER = {"0": 1, "1": 2, "3": 3, "4": 4, "5": 5, "7": 6, "8": 6, "9": 7, "10": 8, "11": 9,
"12": 10, "17": 11, "18": 12, "19": 13, "20": 14, "21": 15, "22": 16, "23": 17, "24": 18,
"25": 19, "26": 20, "27": 21, "28": 22, "29": 23, "30": 24, "31": 24, "32": 24, "33": 7,
"34": 7, "35": 7, "36": 25, "37": 25, "38": 25, "39": 25, "40": 26, "41": 27, "42": 28,
"43": 29, "44": 9, "45": 30, "46": 30, "47": 30, "48": 31, "49": 32, "50": 33, "51": 34,
"52": 33, "53": 35, "54": 36, "55": 37, "56": 38, "57": 39, "58": 37, "59": 37, "61": 37, "60": 36,
"62": 40, "63": 15, "64": 41, "65": 19, "66": 42, "67": 43, "68": 44, "69": 45, "70": 46, "71": 47,
"72": 48, "73": 16, "74": 50, "75": 51, "76": 52, "77": 53, "78": 54, "79": 55, "80": 56, "81": 57,
"82": 58, "83": 59, "84": 60, "85": 61, "86": 62, "87": 63, "88": 64, "89": 65, "90": 66, "91": 67,
"92": 68, "93": 69, "94": 70, "95": 71, "96": 72, "97": 73, "98": 74, "99": 75}
def retrieve_ims_xml(): # getting an xml document from the ims(israel meteorological service) website
logging.basicConfig(level=logging.DEBUG)
s = requests.session()
r = s.get('http://www.ims.gov.il/ims/PublicXML/observ.xml')
xml_doc = minidom.parseString(r.text)
collection = xml_doc.documentElement
return collection
def parse_date(created):
"""
:param created: Date & Time string from csv
:return: Python datetime object
"""
global time
global hour
DATE_FORMATS = ['%m/%d/%Y %I:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %I:%M:%S', '%d/%m/%Y %I:%M', '%Y/%m/%d %I:%M', '%m/%d/%Y %I:%M']
for date_format in DATE_FORMATS:
try:
if date_format == '%Y-%m-%d %H:%M:%S':
time = datetime.strptime(str(created)[:-4], date_format)
hour = time.strftime('%H')
hour = int(hour)
else:
time = datetime.strptime(str(created)[:-3], date_format)
hour = time.strftime('%H')
hour = int(hour) if str(created).endswith('AM') else int(hour) + 12
break
except ValueError:
pass
return datetime(time.year, time.month, time.day, hour, time.minute, 0)
def is_nth_weekday(nth, daynum, year,
month): # find if date is the nth occurrence of the daynum day of the week (ex: the forth sunday of april 2016)
# start counting the daynum from monday = 0
return calendar.Calendar(nth).monthdatescalendar(
year,
month
)[nth][daynum]
def get_parent_object_node(node):
while node.parentNode:
node = node.parentNode
if node.nodeName == "Object":
return node
def accident_time_zone_adjustment(created): # return accident time in UTC time
accident_date = parse_date(created)
daylight_saving_time = is_nth_weekday(4, 4, accident_date.year, 3)
winter_clock = is_nth_weekday(4, 6, accident_date.year, 10)
# weather is given in UTC time
# therefore in daylight_saving_time we deduct 3 hours from the local time and in winter clock 2 hours
# [
accident_date = accident_date.replace(hour=accident_date.hour - TIME_ZONE)
# if accident happend between april and september
if accident_date.month < 10 & accident_date.month > 3:
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend before the last sunday of october at 2:00 o'clock
elif accident_date.month == 10 & (
winter_clock.day > accident_date.day | (
winter_clock.day == accident_date.day & accident_date.hour < 2)):
accident_date.replace(hour=accident_date.hour - 1)
# if accident happend after the last friday of march at 2:00 o'clock
elif (accident_date.month == 3 & daylight_saving_time.day < accident_date.day | (
daylight_saving_time.day == accident_date.day & accident_date.hour >= 2)):
accident_date.replace(hour=accident_date.hour - 1)
# ]
adate = ''.join(
(str(accident_date.year), str(accident_date.month), str(accident_date.day), str(accident_date.hour)))
return adate
def all_station_in_date_frame(collection, created): # return the stations data in the time of the accident
doc = minidom.Document()
base = doc.createElement('accident_date')
doc.appendChild(base)
station_data_in_date = collection.getElementsByTagName('date_selected')
station_data_in_date.sort()
accident_date = accident_time_zone_adjustment(created)
for station in enumerate(station_data_in_date):
if accident_date in str(station.childNodes[0].nodeValue):
base.appendChild(get_parent_object_node(station))
return base
def find_station_by_coordinate(collection, latitude, longitude):
station_place_in_xml = -1
min_distance = float("inf") # initialize big starting value so the distance will always be smaller than the initial
station_data = collection.getElementsByTagName('surface_station')
for i, station in enumerate(station_data):
station_lon = station.getElementsByTagName('station_lon')
assert len(station_lon) == 1
lon = float(station_lon[0].childNodes[0].nodeValue)
lon_difference = (lon - float(longitude)) ** 2
station_lat = station.getElementsByTagName('station_lat')
assert len(station_lat) == 1
lat = float(station_lat[0].childNodes[0].nodeValue)
lat_difference = (lat - float(latitude)) ** 2
temp_dis = math.sqrt(lat_difference + lon_difference)
if temp_dis < min_distance:
min_distance = temp_dis
station_place_in_xml = i
return station_place_in_xml
def convert_xml_values_to_numbers(rain):
num_conv = rain[:2] # variable to help convert from string to number
for char in num_conv: # in the xml number are in a three digits format (4-004), we delete the 0es before the number
if char == '0':
rain.replace(char, '')
else:
break
rain_in_millimeters = float(rain)
if rain_in_millimeters >= 990:
# numbers that are higher then 990 in the xml code equals 0.(the last digit) for example 991 = 0.1
rain_in_millimeters *= 0.01
return rain_in_millimeters
def get_weather_element(station, weather_data, tag):
element = weather_data[station].getElementsByTagName(tag)
if element:
weather_element = element[0].childNodes[0].nodeValue
else:
weather_element = None
return weather_element
def process_weather_data(collection, latitude, longitude):
weather = 1 # default weather is clear sky
station = find_station_by_coordinate(collection, latitude, longitude)
weather_data = collection.getElementsByTagName('surface_observation')
wind_force = get_weather_element(station, weather_data, 'FF')
rain = get_weather_element(station, weather_data, 'RRR')
rain_duration = get_weather_element(station, weather_data,
'TR') # the duration of time in which the rain amount was measured
weather_code = get_weather_element(station, weather_data, 'WW')
if weather_code is not None:
return WEATHER[weather_code.strip()]
if wind_force is not None:
if int(wind_force) > 8:
weather = 76 # סופת רוחות
elif int(wind_force) > 5:
weather = 77 # רוחות חזקות
if rain is not None and rain_duration is not None:
rain_in_millimeters = convert_xml_values_to_numbers(rain)
rain_hours = RAIN_DURATION_CODE_TO_HOURS[str(rain_duration).strip()]
# rain amount is between 0.1 and 0.5 millimeter
if 0.0 < rain_in_millimeters <= 0.5 or (
0.0 < rain_in_millimeters / rain_hours <= 0.5):
if weather == 76:
weather = 80 # סופת רוחות, גשם קל
elif weather == 77:
weather = 84 # רוחות חזקות, גשם קל
else:
weather = 37 # גשם קל
# average rain amount per hour is between 0.5 and 4.0 millimeters
if 0.5 < rain_in_millimeters / rain_hours <= 4:
if weather == 76:
weather = 81 # גשם וסופת רוחות
elif weather == 77:
weather = 85 # גשם ורוחות חזקות
else:
weather = 15 # גשם
# average rain amount per hour is between 4.0 and 8.0 millimeters
elif 4 < rain_in_millimeters / rain_hours <= 8:
if 76 == weather:
weather = 82 # סופת רוחות, גשם שוטף
if weather == 77:
weather = 86 # רוחות חזקות, גשם שוטף
else:
weather = 78 # גשם שוטף
# average rain amount per hour is more than 8.0 millimeters
elif rain_in_millimeters / rain_hours > 8:
if weather == 76:
weather = 83 # סופת רוחות, גשם זלעפות
if weather == 77:
weather = 87 # רוחות חזקות, גשם זלעפות
else:
weather = 79 # גשם זלעפות
return weather
CSVMAP = [
{"id": 0, "time": 1, "lat": 2, "long": 3, "street": 4, "city": 6, "comment": 7, "type": 8, "casualties": 9},
{"id": 0, "time": 1, "type": 2, "long": 3, "lat": 4, "city": 5, "street": 6, "comment": 7, "casualties": 8},
]
def create_accidents(collection, file_location):
"""
:param file_location: local location of .csv
:return: Yields a marker object with every iteration
"""
logging.info("\tReading accidents data from '%s'..." % file_location)
with open(file_location, 'rU') as f:
reader = csv.reader(f, delimiter=',', dialect=csv.excel_tab)
for line, accident in enumerate(reader):
if line == 0: # header
format_version = 0 if "MissionID" in accident[0] else 1
continue
if not accident: # empty line
continue
if line == 1 and accident[0] == "":
logging.warn("\t\tEmpty File!")
continue
csvmap = CSVMAP[format_version]
if accident[csvmap["lat"]] == "" or accident[csvmap["long"]] == "" or \
accident[csvmap["lat"]] is None or accident[csvmap["long"]] is None or \
accident[csvmap["lat"]] == "NULL" or accident[csvmap["long"]] == "NULL":
logging.warn("\t\tMissing coordinates in line {0}. Moving on...".format(line + 1))
continue
created = parse_date(accident[csvmap["time"]])
marker = {'id': accident[csvmap["id"]], 'latitude': accident[csvmap["lat"]],
'longitude': accident[csvmap["long"]], 'created': created, 'provider_code': PROVIDER_CODE,
'title': unicode(accident[csvmap["type"]], encoding='utf-8')[:100],
'address': unicode((accident[csvmap["street"]] + ' ' + accident[csvmap["city"]]),
encoding='utf-8'),
'severity': 2 if u"קשה" in unicode(accident[csvmap["type"]], encoding='utf-8') else 3,
'locationAccuracy': 1, 'subtype': 21, 'type': CONST.MARKER_TYPE_ACCIDENT,
'description': unicode(accident[csvmap["comment"]], encoding='utf-8'),
'weather': process_weather_data(collection, accident[csvmap["lat"]],
accident[csvmap["long"]])}
if format_version == 0:
casualties = accident[csvmap["casualties"]]
marker['intactness'] = casualties if casualties.isdigit() else 0
yield marker
def import_to_db(collection, path):
"""
:param path: Local files directory ('united_path' on main() below)
:return: length of DB entries after execution
"""
app = init_flask(__name__)
db = SQLAlchemy(app)
accidents = list(create_accidents(collection, path))
if not accidents:
return 0
new_ids = [m["id"] for m in accidents
if 0 == Marker.query.filter(and_(Marker.id == m["id"],
Marker.provider_code == m["provider_code"])).count()]
if not new_ids:
logging.info("\t\tNothing loaded, all accidents already in DB")
return 0
db.session.execute(Marker.__table__.insert(), [m for m in accidents if m["id"] in new_ids])
db.session.commit()
return len(new_ids)
def update_db(collection):
"""
:return: length of DB entries after execution
"""
app = init_flask(__name__)
db = SQLAlchemy(app)
united = Marker.query.filter(Marker.provider_code == 2)
for accident in united:
if not accident.weather:
accident.weather = process_weather_data(collection, accident.latitude, accident.longitude)
db.session.commit()
logging.info("\tFinished commiting the changes")
def main():
"""
Calls importmail.py prior to importing to DB
"""
parser = argparse.ArgumentParser()
parser.add_argument('--light', action='store_true',
help='Import without downloading any new files')
parser.add_argument('--username', default='')
parser.add_argument('--password', default='')
parser.add_argument('--lastmail', action='store_true')
parser.add_argument('--newformat', action='store_true',
help='Parse files using new format')
args = parser.parse_args()
collection = retrieve_ims_xml()
if not args.light:
logging.info("Importing data from mail...")
importmail.main(args.username, args.password, args.lastmail)
united_path = "static/data/united/"
total = 0
logging.info("Loading United accidents...")
for united_file in os.listdir(united_path):
if united_file.endswith(".csv"):
total += import_to_db(collection, united_path + united_file)
logging.info("\tImported {0} items".format(total))
update_db(collection)
if __name__ == "__main__":
main()
| boazin/anyway | united.py | Python | bsd-3-clause | 15,199 |
__author__ = 'Igor'
| Smart-Green/needle | tests/haystackClient/__init__.py | Python | apache-2.0 | 20 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.