code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# Copyright 2012 John Sullivan
# Copyright 2012 Other contributers as noted in the CONTRIBUTERS file
#
# This file is part of Galah.
#
# Galah is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Galah is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Galah. If not, see <http://www.gnu.org/licenses/>.
# Snippet below derived from code published by Matteo Dell'Amico and published
# under the unlicense.
# see: https://gist.github.com/4451520
# If you wish to use this file in your own project, go to the link above, DO NOT
# use the code within this file verbatim unless you are prepared to work under
# the restrictions of the AGPLv3, as this file is licenced under it.
from heapq import heapify, heappush, heappop
from collections import namedtuple
PriorityValuePair = namedtuple("PriorityValuePair", ["priority", "value"])
class PriorityDict(dict):
"""Dictionary that can be used as a priority queue.
Keys of the dictionary are items to be put into the queue, and values
are their respective priorities. All dictionary methods work as expected.
The advantage over a standard heapq-based priority queue is
that priorities of items can be efficiently updated (amortized O(1))
using code as 'thedict[item] = new_priority.'
The 'smallest' method can be used to return the object with lowest
priority, and 'pop_smallest' also removes it.
The 'sorted_iter' method provides a destructive sorted iterator.
"""
def __init__(self, *args, **kwargs):
super(PriorityDict, self).__init__(*args, **kwargs)
self._rebuild_heap()
def _rebuild_heap(self):
self._heap = [(v, k) for k, v in self.iteritems()]
heapify(self._heap)
def smallest(self):
"""
Return the item with the lowest priority as a named tuple
(priority, value).
Raises IndexError if the object is empty.
"""
heap = self._heap
v, k = heap[0]
while k not in self or self[k] != v:
heappop(heap)
v, k = heap[0]
return PriorityValuePair(v, k)
def pop_smallest(self):
"""
Return the item as a named tuple (priority, value) with the lowest
priority and remove it.
Raises IndexError if the object is empty.
"""
heap = self._heap
v, k = heappop(heap)
while k not in self or self[k] != v:
v, k = heappop(heap)
del self[k]
return PriorityValuePair(v, k)
def __setitem__(self, key, val):
# We are not going to remove the previous value from the heap,
# since this would have a cost O(n).
super(PriorityDict, self).__setitem__(key, val)
if len(self._heap) < 2 * len(self):
heappush(self._heap, (val, key))
else:
# When the heap grows larger than 2 * len(self), we rebuild it
# from scratch to avoid wasting too much memory.
self._rebuild_heap()
def setdefault(self, key, val):
if key not in self:
self[key] = val
return val
return self[key]
def update(self, *args, **kwargs):
# Reimplementing dict.update is tricky -- see e.g.
# http://mail.python.org/pipermail/python-ideas/2007-May/000744.html
# We just rebuild the heap from scratch after passing to super.
super(PriorityDict, self).update(*args, **kwargs)
self._rebuild_heap()
def sorted_iter(self):
"""Sorted iterator of the priority dictionary items.
Beware: this will destroy elements as they are returned.
"""
while self:
yield self.pop_smallest() | tugluck/galah | galah/base/prioritydict.py | Python | agpl-3.0 | 4,196 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) Open Solutions Finland 2013.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Delivery date to order line",
"version" : "1.0",
"author" : "Open Solutions Finland",
"description" : """
""",
"website" : "http://www.opensolutions.fi",
"depends" : ["base","product","sale"],
"category" : "Generic Modules",
"init_xml" : [],
"demo_xml" : [],
"data" : [
'sale_order_line_view.xml'
],
'test': [
],
'installable': True,
'active': False,
'certificate': '',
}
| OpenSolutionsFinland/deliver_date | __openerp__.py | Python | agpl-3.0 | 1,459 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapy', '0003_personvote_timestamp'),
('mps_v2', '0032_auto_20160531_1913'),
]
operations = [
migrations.AddField(
model_name='voting',
name='voting',
field=models.ForeignKey(related_name='stenogram_votings', to='scrapy.Voting', null=True),
),
migrations.AlterUniqueTogether(
name='voting',
unique_together=set([('voting', 'stenogram_topic')]),
),
migrations.RemoveField(
model_name='voting',
name='topic',
),
]
| ManoSeimas/manoseimas.lt | manoseimas/mps_v2/migrations/0033_auto_20160603_1235.py | Python | agpl-3.0 | 745 |
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from datetime import datetime
from jormungandr.interfaces.v1.serializer.base import PbNestedSerializer, EnumField, EnumListField
from jormungandr.interfaces.v1.serializer import pt, jsonschema, base
from jormungandr.interfaces.v1.serializer.fields import LinkSchema, MultiLineStringField
from jormungandr.interfaces.v1.make_links import create_internal_link
from jormungandr.interfaces.v1.serializer.jsonschema.fields import TimeOrDateTimeType
from jormungandr.utils import timestamp_to_str
def _get_links(obj):
display_info = obj.pt_display_informations
uris = display_info.uris
l = [("line", uris.line),
("company", uris.company),
("vehicle_journey", uris.vehicle_journey),
("route", uris.route),
("commercial_mode", uris.commercial_mode),
("physical_mode", uris.physical_mode),
("network", uris.network)]
return [{"type": k, "id": v} for k, v in l if v != ""] + base.make_notes(display_info.notes)
class PassageSerializer(PbNestedSerializer):
route = pt.RouteSerializer()
stop_point = pt.StopPointSerializer()
stop_date_time = pt.StopDateTimeSerializer()
display_informations = pt.VJDisplayInformationSerializer(attr='pt_display_informations')
links = jsonschema.MethodField(schema_type=LinkSchema(many=True))
def get_links(self, obj):
return _get_links(obj)
class DateTimeTypeSerializer(PbNestedSerializer):
date_time = jsonschema.MethodField(schema_type=TimeOrDateTimeType, display_none=True)
additional_informations = pt.AdditionalInformation(attr='additional_informations', display_none=True)
links = pt.PropertiesLinksSerializer(attr="properties")
data_freshness = EnumField(attr="realtime_level", display_none=True)
def get_date_time(self, obj):
__date_time_null_value__ = 2**64 - 1
if obj.time == __date_time_null_value__:
return ""
if obj.HasField('date'):
return timestamp_to_str(obj.date + obj.time)
return datetime.utcfromtimestamp(obj.time).strftime('%H%M%S')
class StopScheduleSerializer(PbNestedSerializer):
stop_point = pt.StopPointSerializer()
route = pt.RouteSerializer()
additional_informations = EnumField(attr="response_status", display_none=True)
display_informations = pt.RouteDisplayInformationSerializer(attr='pt_display_informations')
date_times = DateTimeTypeSerializer(many=True, display_none=True)
links = jsonschema.MethodField(schema_type=LinkSchema(many=True))
def get_links(self, obj):
return _get_links(obj)
class RowSerializer(PbNestedSerializer):
stop_point = pt.StopPointSerializer()
date_times = DateTimeTypeSerializer(many=True, display_none=True)
class HeaderSerializer(PbNestedSerializer):
additional_informations = EnumListField(attr='additional_informations', display_none=True)
display_informations = pt.VJDisplayInformationSerializer(attr='pt_display_informations')
links = jsonschema.MethodField(schema_type=LinkSchema(many=True))
def get_links(self, obj):
return _get_links(obj)
class TableSerializer(PbNestedSerializer):
rows = RowSerializer(many=True, display_none=True)
headers = HeaderSerializer(many=True, display_none=True)
class RouteScheduleSerializer(PbNestedSerializer):
table = TableSerializer()
display_informations = pt.RouteDisplayInformationSerializer(attr='pt_display_informations')
geojson = MultiLineStringField(display_none=False)
additional_informations = EnumField(attr="response_status", display_none=True)
links = jsonschema.MethodField(schema_type=LinkSchema(many=True))
def get_links(self, obj):
return _get_links(obj)
| antoine-de/navitia | source/jormungandr/jormungandr/interfaces/v1/serializer/schedule.py | Python | agpl-3.0 | 4,895 |
# Copyright 2017-2019 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import exceptions
from odoo.tests import common
class TestPartnerDeduplicateAcl(common.TransactionCase):
def setUp(self):
super().setUp()
self.partner_1 = self.env["res.partner"].create(
{
"name": "Partner 1",
"email": "[email protected]",
"is_company": True,
"parent_id": False,
}
)
self.partner_2 = self.partner_1.copy()
self.partner_2.write({"name": "Partner 1", "email": "[email protected]"})
self.user = self.env["res.users"].create(
{
"login": "test_crm_deduplicate_acl",
"name": "test_crm_deduplicate_acl",
"email": "[email protected]",
"groups_id": [
(4, self.env.ref("base.group_user").id),
(4, self.env.ref("base.group_partner_manager").id),
],
}
)
self.wizard = (
self.env["base.partner.merge.automatic.wizard"]
.with_user(self.user)
.create({"group_by_name": True})
)
def test_same_email_restriction(self):
self.wizard.action_start_manual_process()
with self.assertRaises(exceptions.UserError):
self.wizard.action_merge()
self.user.groups_id = [
(4, self.env.ref("partner_deduplicate_acl.group_unrestricted").id)
]
# Now there shouldn't be error
self.wizard.action_merge()
| OCA/partner-contact | partner_deduplicate_acl/tests/test_partner_deduplicate_acl.py | Python | agpl-3.0 | 1,662 |
#!/usr/bin/env python2
from titanembeds.app import app, socketio
import subprocess
def init_debug():
import os
from flask import jsonify, request
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1' # Testing oauthlib
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
# Session viewer https://gist.github.com/babldev/502364a3f7c9bafaa6db
def decode_flask_cookie(secret_key, cookie_str):
import hashlib
from itsdangerous import URLSafeTimedSerializer
from flask.sessions import TaggedJSONSerializer
salt = 'cookie-session'
serializer = TaggedJSONSerializer()
signer_kwargs = {
'key_derivation': 'hmac',
'digest_method': hashlib.sha1
}
s = URLSafeTimedSerializer(secret_key, salt=salt, serializer=serializer, signer_kwargs=signer_kwargs)
return s.loads(cookie_str)
@app.route("/session")
def session():
cookie = request.cookies.get('session')
if cookie:
decoded = decode_flask_cookie(app.secret_key, request.cookies.get('session'))
else:
decoded = None
return jsonify(session_cookie=decoded)
@app.route("/github-update", methods=["POST"])
def github_update():
try:
subprocess.Popen("git pull", shell=True).wait()
except OSError:
return "ERROR"
@app.route("/error")
def make_error():
1 / 0
return "OK"
if __name__ == "__main__":
init_debug()
socketio.run(app, host="0.0.0.0",port=3000,debug=True)
| TitanEmbeds/Titan | webapp/run.py | Python | agpl-3.0 | 1,599 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^pool/(?P<pk>[0-9a-zA-Z\/]+)/$', views.UserRedirectView.as_view(), name='pool'),
url(r'^pool/(?P<pk>[\d\w_]+)$', views.pool_fix, name='pool_fix'), #allow decimal and words only.
]
| agusmakmun/Some-Examples-of-Simple-Python-Script | Django/redirect/urls.py | Python | agpl-3.0 | 264 |
# -*- coding: utf-8 -*-
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl).
from odoo import models, api
class SendPEC(models.TransientModel):
_name = 'wizard.fatturapa.send.pec'
_description = "Wizard to send multiple e-invoice PEC"
@api.multi
def send_pec(self):
if self.env.context.get('active_ids'):
attachments = self.env['fatturapa.attachment.out'].browse(
self.env.context['active_ids'])
attachments.send_via_pec()
| linkitspa/l10n-italy | l10n_it_fatturapa_pec/wizard/send_pec.py | Python | agpl-3.0 | 503 |
# To ensure that the core entry types are registered
| melissiproject/server | melisi/mlscommon/__init__.py | Python | agpl-3.0 | 54 |
# -*- coding: utf-8 -*-
# © 2016 Elico Corp (https://www.elico-corp.com).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Business Requirement Deliverable - Project',
'category': 'Business Requirements Management',
'summary': 'Create projects and tasks directly from'
' the Business Requirement and Resources lines',
'version': '8.0.4.0.6',
'website': 'https://www.elico-corp.com/',
"author": "Elico Corp, Odoo Community Association (OCA)",
'depends': [
'business_requirement_deliverable',
'project',
],
'data': [
'views/business_view.xml',
'views/project.xml',
'wizard/generate_projects_view.xml',
],
'image': [
'static/description/icon.png',
'static/img/bus_req_project.png'
],
'license': 'AGPL-3',
'installable': False,
}
| sudhir-serpentcs/business-requirement | business_requirement_deliverable_project/__manifest__.py | Python | agpl-3.0 | 882 |
#!/usr/bin/env python3
# -*- coding : utf-8 -*-
def newone():
print("newone test!")
newone() | kmahyyg/learn_py3 | modules/mymodule1/__init__.py | Python | agpl-3.0 | 98 |
import click
from lc8_download.lc8 import Downloader
@click.command('lc8_download')
@click.argument('scene', type=str, metavar='<scene>')
@click.option('-b', type=str, help="""Bands to be downloaded. Use commas as
delimiter. Example: '-b 2,3,4,BQA'""")
@click.option('--all', is_flag=True, help="Download all bands and metadata")
@click.option('path', '--path', default=None,
type=click.Path(file_okay=False, writable=True),
help="Directory where the files will be saved. Default: ~/landsat/")
@click.option('--metadata', is_flag=True, help="Download scene metadata file.")
def cli(scene, b, path, metadata, all):
lc8 = Downloader(scene)
if all:
bands = list(range(1, 12)) + ['BQA']
metadata = True
else:
bands = []
for band in b.split(','):
if band != 'BQA':
band = int(band)
bands.append(band)
lc8.download(bands, path, metadata)
| ibamacsr/lc8_download | lc8_download/scripts/cli.py | Python | agpl-3.0 | 937 |
#!/usr/bin/env python
# -*- encoding: UTF-8 -*-
# This file is part of Addison Arches.
#
# Addison Arches is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Addison Arches is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Addison Arches. If not, see <http://www.gnu.org/licenses/>.
from collections import namedtuple
Action = namedtuple(
"Action", ["name", "rel", "typ", "ref", "method", "parameters", "prompt"])
Parameter = namedtuple("Parameter", ["name", "required", "regex", "values", "tip"])
class View:
def __init__(self, obj, actions={}, *args, **kwargs):
super().__init__(*args, **kwargs)
self.obj = obj
self.type = obj.__class__.__name__
self.fields = obj._fields
self.actions = actions
def rejects(self, action:str):
try:
data = vars(self.obj)
except TypeError:
data = self.obj._asdict()
action = self.actions[action]
missing = [i for i in action.parameters
if i.required and i.name not in data]
missing = missing or [
i for i in action.parameters if i.name in data
and i.values and data[i.name] not in i.values]
missing = missing or [
i for i in action.parameters
if i.name in data and not i.regex.match(str(data[i.name]))]
return missing
| tundish/addisonarches | addisonarches/web/hateoas.py | Python | agpl-3.0 | 1,827 |
# -*- coding: utf-8 -*-
""" Tests for student account views. """
import re
from unittest import skipUnless
from urllib import urlencode
import json
import mock
import ddt
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core import mail
from django.test.utils import override_settings
from util.testing import UrlResetMixin
from third_party_auth.tests.testutil import simulate_running_pipeline
from openedx.core.djangoapps.user_api.api import account as account_api
from openedx.core.djangoapps.user_api.api import profile as profile_api
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase, mixed_store_config
)
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import CourseModeFactory
MODULESTORE_CONFIG = mixed_store_config(settings.COMMON_TEST_DATA_ROOT, {}, include_xml=False)
@ddt.ddt
class StudentAccountUpdateTest(UrlResetMixin, TestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴"
OLD_EMAIL = u"[email protected]"
NEW_EMAIL = u"[email protected]"
INVALID_ATTEMPTS = 100
INVALID_EMAILS = [
None,
u"",
u"a",
"no_domain",
"no+domain",
"@",
"@domain.com",
"test@no_extension",
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u"{user}@example.com".format(
user=(u'e' * (account_api.EMAIL_MAX_LENGTH - 11))
)
]
INVALID_KEY = u"123abc"
@mock.patch.dict(settings.FEATURES, {'ENABLE_NEW_DASHBOARD': True})
def setUp(self):
super(StudentAccountUpdateTest, self).setUp("student_account.urls")
# Create/activate a new account
activation_key = account_api.create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
account_api.activate_account(activation_key)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
def test_index(self):
response = self.client.get(reverse('account_index'))
self.assertContains(response, "Student Account")
def test_change_email(self):
response = self._change_email(self.NEW_EMAIL, self.OLD_PASSWORD)
self.assertEquals(response.status_code, 200)
# Verify that the email associated with the account remains unchanged
profile_info = profile_api.profile_info(self.USERNAME)
self.assertEquals(profile_info['email'], self.OLD_EMAIL)
# Check that an email was sent with the activation key
self.assertEqual(len(mail.outbox), 1)
self._assert_email(
mail.outbox[0],
[self.NEW_EMAIL],
u"Email Change Request",
u"There was recently a request to change the email address"
)
# Retrieve the activation key from the email
email_body = mail.outbox[0].body
result = re.search('/email/confirmation/([^ \n]+)', email_body)
self.assertIsNot(result, None)
activation_key = result.group(1)
# Attempt to activate the email
response = self.client.get(reverse('email_change_confirm', kwargs={'key': activation_key}))
self.assertEqual(response.status_code, 200)
# Verify that the email was changed
profile_info = profile_api.profile_info(self.USERNAME)
self.assertEquals(profile_info['email'], self.NEW_EMAIL)
# Verify that notification emails were sent
self.assertEqual(len(mail.outbox), 2)
self._assert_email(
mail.outbox[1],
[self.OLD_EMAIL, self.NEW_EMAIL],
u"Email Change Successful",
u"You successfully changed the email address"
)
def test_email_change_wrong_password(self):
response = self._change_email(self.NEW_EMAIL, "wrong password")
self.assertEqual(response.status_code, 401)
def test_email_change_request_no_user(self):
# Patch account API to raise an internal error when an email change is requested
with mock.patch('student_account.views.account_api.request_email_change') as mock_call:
mock_call.side_effect = account_api.AccountUserNotFound
response = self._change_email(self.NEW_EMAIL, self.OLD_PASSWORD)
self.assertEquals(response.status_code, 400)
def test_email_change_request_email_taken_by_active_account(self):
# Create/activate a second user with the new email
activation_key = account_api.create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
account_api.activate_account(activation_key)
# Request to change the original user's email to the email now used by the second user
response = self._change_email(self.NEW_EMAIL, self.OLD_PASSWORD)
self.assertEquals(response.status_code, 409)
def test_email_change_request_email_taken_by_inactive_account(self):
# Create a second user with the new email, but don't active them
account_api.create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Request to change the original user's email to the email used by the inactive user
response = self._change_email(self.NEW_EMAIL, self.OLD_PASSWORD)
self.assertEquals(response.status_code, 200)
@ddt.data(*INVALID_EMAILS)
def test_email_change_request_email_invalid(self, invalid_email):
# Request to change the user's email to an invalid address
response = self._change_email(invalid_email, self.OLD_PASSWORD)
self.assertEquals(response.status_code, 400)
def test_email_change_confirmation(self):
# Get an email change activation key
activation_key = account_api.request_email_change(self.USERNAME, self.NEW_EMAIL, self.OLD_PASSWORD)
# Follow the link sent in the confirmation email
response = self.client.get(reverse('email_change_confirm', kwargs={'key': activation_key}))
self.assertContains(response, "Email change successful")
# Verify that the email associated with the account has changed
profile_info = profile_api.profile_info(self.USERNAME)
self.assertEquals(profile_info['email'], self.NEW_EMAIL)
def test_email_change_confirmation_invalid_key(self):
# Visit the confirmation page with an invalid key
response = self.client.get(reverse('email_change_confirm', kwargs={'key': self.INVALID_KEY}))
self.assertContains(response, "Something went wrong")
# Verify that the email associated with the account has not changed
profile_info = profile_api.profile_info(self.USERNAME)
self.assertEquals(profile_info['email'], self.OLD_EMAIL)
def test_email_change_confirmation_email_already_exists(self):
# Get an email change activation key
email_activation_key = account_api.request_email_change(self.USERNAME, self.NEW_EMAIL, self.OLD_PASSWORD)
# Create/activate a second user with the new email
account_activation_key = account_api.create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
account_api.activate_account(account_activation_key)
# Follow the link sent to the original user
response = self.client.get(reverse('email_change_confirm', kwargs={'key': email_activation_key}))
self.assertContains(response, "address you wanted to use is already used")
# Verify that the email associated with the original account has not changed
profile_info = profile_api.profile_info(self.USERNAME)
self.assertEquals(profile_info['email'], self.OLD_EMAIL)
def test_email_change_confirmation_internal_error(self):
# Get an email change activation key
activation_key = account_api.request_email_change(self.USERNAME, self.NEW_EMAIL, self.OLD_PASSWORD)
# Patch account API to return an internal error
with mock.patch('student_account.views.account_api.confirm_email_change') as mock_call:
mock_call.side_effect = account_api.AccountInternalError
response = self.client.get(reverse('email_change_confirm', kwargs={'key': activation_key}))
self.assertContains(response, "Something went wrong")
def test_email_change_request_missing_email_param(self):
response = self._change_email(None, self.OLD_PASSWORD)
self.assertEqual(response.status_code, 400)
def test_email_change_request_missing_password_param(self):
response = self._change_email(self.OLD_EMAIL, None)
self.assertEqual(response.status_code, 400)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search('(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been set.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
response = self.client.post(
activation_link,
{'new_password1': self.OLD_PASSWORD, 'new_password2': self.OLD_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "The password reset link was invalid, possibly because the link has already been used.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
account_api.create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 400)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 400)
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for attempt in xrange(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('get', 'account_index', []),
('post', 'email_change_request', []),
('get', 'email_change_confirm', [123])
)
@ddt.unpack
def test_require_login(self, method, url_name, args):
# Access the page while logged out
self.client.logout()
url = reverse(url_name, args=args)
response = getattr(self.client, method)(url, follow=True)
# Should have been redirected to the login page
self.assertEqual(len(response.redirect_chain), 1)
self.assertIn('accounts/login?next=', response.redirect_chain[0][0])
@ddt.data(
('get', 'account_index', []),
('post', 'email_change_request', []),
('get', 'email_change_confirm', [123]),
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _assert_email(self, email, expected_to, expected_subject, expected_body):
"""Check whether an email has the correct properties. """
self.assertEqual(email.to, expected_to)
self.assertIn(expected_subject, email.subject)
self.assertIn(expected_body, email.body)
def _change_email(self, new_email, password):
"""Request to change the user's email. """
data = {}
if new_email is not None:
data['email'] = new_email
if password is not None:
# We can't pass a Unicode object to urlencode, so we encode the Unicode object
data['password'] = password.encode('utf-8')
return self.client.post(path=reverse('email_change_request'), data=data)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
@ddt.ddt
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
class StudentAccountLoginAndRegistrationTest(ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "[email protected]"
PASSWORD = "password"
@ddt.data(
("account_login", "login"),
("account_register", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = u"data-initial-mode=\"{mode}\"".format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("account_login", "account_register")
def test_login_and_registration_form_already_authenticated(self, url_name):
# Create/activate a new account and log in
activation_key = account_api.create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
account_api.activate_account(activation_key)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("account_login", "account_register")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, [])
@ddt.data(
("account_login", None, None),
("account_register", None, None),
("account_login", "google-oauth2", "Google"),
("account_register", "google-oauth2", "Google"),
("account_login", "facebook", "Facebook"),
("account_register", "facebook", "Facebook"),
)
@ddt.unpack
def test_third_party_auth(self, url_name, current_backend, current_provider):
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "student_account.views.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend):
response = self.client.get(reverse(url_name))
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name))
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"name": "Facebook",
"iconClass": "fa-facebook",
"loginUrl": self._third_party_login_url("facebook", "account_login"),
"registerUrl": self._third_party_login_url("facebook", "account_register")
},
{
"name": "Google",
"iconClass": "fa-google-plus",
"loginUrl": self._third_party_login_url("google-oauth2", "account_login"),
"registerUrl": self._third_party_login_url("google-oauth2", "account_register")
}
]
self._assert_third_party_auth_data(response, current_provider, expected_providers)
@ddt.data([], ["honor"], ["honor", "verified", "audit"], ["professional"])
def test_third_party_auth_course_id_verified(self, modes):
# Create a course with the specified course modes
course = CourseFactory.create()
for slug in modes:
CourseModeFactory.create(
course_id=course.id,
mode_slug=slug,
mode_display_name=slug
)
# Verify that the entry URL for third party auth
# contains the course ID and redirects to the track selection page.
course_modes_choose_url = reverse(
"course_modes_choose",
kwargs={"course_id": unicode(course.id)}
)
expected_providers = [
{
"name": "Facebook",
"iconClass": "fa-facebook",
"loginUrl": self._third_party_login_url(
"facebook", "account_login",
course_id=unicode(course.id),
redirect_url=course_modes_choose_url
),
"registerUrl": self._third_party_login_url(
"facebook", "account_register",
course_id=unicode(course.id),
redirect_url=course_modes_choose_url
)
},
{
"name": "Google",
"iconClass": "fa-google-plus",
"loginUrl": self._third_party_login_url(
"google-oauth2", "account_login",
course_id=unicode(course.id),
redirect_url=course_modes_choose_url
),
"registerUrl": self._third_party_login_url(
"google-oauth2", "account_register",
course_id=unicode(course.id),
redirect_url=course_modes_choose_url
)
}
]
# Verify that the login page contains the correct provider URLs
response = self.client.get(reverse("account_login"), {"course_id": unicode(course.id)})
self._assert_third_party_auth_data(response, None, expected_providers)
def test_third_party_auth_course_id_shopping_cart(self):
# Create a course with a white-label course mode
course = CourseFactory.create()
CourseModeFactory.create(
course_id=course.id,
mode_slug="honor",
mode_display_name="Honor",
min_price=100
)
# Verify that the entry URL for third party auth
# contains the course ID and redirects to the shopping cart
shoppingcart_url = reverse("shoppingcart.views.show_cart")
expected_providers = [
{
"name": "Facebook",
"iconClass": "fa-facebook",
"loginUrl": self._third_party_login_url(
"facebook", "account_login",
course_id=unicode(course.id),
redirect_url=shoppingcart_url
),
"registerUrl": self._third_party_login_url(
"facebook", "account_register",
course_id=unicode(course.id),
redirect_url=shoppingcart_url
)
},
{
"name": "Google",
"iconClass": "fa-google-plus",
"loginUrl": self._third_party_login_url(
"google-oauth2", "account_login",
course_id=unicode(course.id),
redirect_url=shoppingcart_url
),
"registerUrl": self._third_party_login_url(
"google-oauth2", "account_register",
course_id=unicode(course.id),
redirect_url=shoppingcart_url
)
}
]
# Verify that the login page contains the correct provider URLs
response = self.client.get(reverse("account_login"), {"course_id": unicode(course.id)})
self._assert_third_party_auth_data(response, None, expected_providers)
def _assert_third_party_auth_data(self, response, current_provider, providers):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
expected_data = u"data-third-party-auth='{auth_info}'".format(
auth_info=json.dumps({
"currentProvider": current_provider,
"providers": providers
})
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, course_id=None, redirect_url=None):
"""Construct the login URL to start third party authentication. """
params = [("auth_entry", auth_entry)]
if redirect_url:
params.append(("next", redirect_url))
if course_id:
params.append(("enroll_course_id", course_id))
return u"{url}?{params}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
params=urlencode(params)
)
| UQ-UQx/edx-platform_lti | lms/djangoapps/student_account/test/test_views.py | Python | agpl-3.0 | 23,896 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import uuid
from itertools import groupby
from datetime import datetime, timedelta
from werkzeug.urls import url_encode
from odoo import api, fields, models, _
from odoo.exceptions import UserError, AccessError
from odoo.osv import expression
from odoo.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT
from odoo.tools.misc import formatLang
from odoo.addons import decimal_precision as dp
class SaleOrder(models.Model):
_name = "sale.order"
_inherit = ['portal.mixin', 'mail.thread', 'mail.activity.mixin']
_description = "Quotation"
_order = 'date_order desc, id desc'
@api.depends('order_line.price_total')
def _amount_all(self):
"""
Compute the total amounts of the SO.
"""
for order in self:
amount_untaxed = amount_tax = 0.0
for line in order.order_line:
amount_untaxed += line.price_subtotal
amount_tax += line.price_tax
order.update({
'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),
'amount_tax': order.pricelist_id.currency_id.round(amount_tax),
'amount_total': amount_untaxed + amount_tax,
})
@api.depends('state', 'order_line.invoice_status')
def _get_invoiced(self):
"""
Compute the invoice status of a SO. Possible statuses:
- no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to
invoice. This is also hte default value if the conditions of no other status is met.
- to invoice: if any SO line is 'to invoice', the whole SO is 'to invoice'
- invoiced: if all SO lines are invoiced, the SO is invoiced.
- upselling: if all SO lines are invoiced or upselling, the status is upselling.
The invoice_ids are obtained thanks to the invoice lines of the SO lines, and we also search
for possible refunds created directly from existing invoices. This is necessary since such a
refund is not directly linked to the SO.
"""
for order in self:
invoice_ids = order.order_line.mapped('invoice_lines').mapped('invoice_id').filtered(lambda r: r.type in ['out_invoice', 'out_refund'])
# Search for invoices which have been 'cancelled' (filter_refund = 'modify' in
# 'account.invoice.refund')
# use like as origin may contains multiple references (e.g. 'SO01, SO02')
refunds = invoice_ids.search([('origin', 'like', order.name), ('company_id', '=', order.company_id.id)]).filtered(lambda r: r.type in ['out_invoice', 'out_refund'])
invoice_ids |= refunds.filtered(lambda r: order.name in [origin.strip() for origin in r.origin.split(',')])
# Search for refunds as well
refund_ids = self.env['account.invoice'].browse()
if invoice_ids:
for inv in invoice_ids:
refund_ids += refund_ids.search([('type', '=', 'out_refund'), ('origin', '=', inv.number), ('origin', '!=', False), ('journal_id', '=', inv.journal_id.id)])
# Ignore the status of the deposit product
deposit_product_id = self.env['sale.advance.payment.inv']._default_product_id()
line_invoice_status = [line.invoice_status for line in order.order_line if line.product_id != deposit_product_id]
if order.state not in ('sale', 'done'):
invoice_status = 'no'
elif any(invoice_status == 'to invoice' for invoice_status in line_invoice_status):
invoice_status = 'to invoice'
elif all(invoice_status == 'invoiced' for invoice_status in line_invoice_status):
invoice_status = 'invoiced'
elif all(invoice_status in ['invoiced', 'upselling'] for invoice_status in line_invoice_status):
invoice_status = 'upselling'
else:
invoice_status = 'no'
order.update({
'invoice_count': len(set(invoice_ids.ids + refund_ids.ids)),
'invoice_ids': invoice_ids.ids + refund_ids.ids,
'invoice_status': invoice_status
})
@api.model
def get_empty_list_help(self, help):
self = self.with_context(
empty_list_help_document_name=_("sale order"),
)
return super(SaleOrder, self).get_empty_list_help(help)
def _get_default_access_token(self):
return str(uuid.uuid4())
@api.model
def _default_note(self):
return self.env['ir.config_parameter'].sudo().get_param('sale.use_sale_note') and self.env.user.company_id.sale_note or ''
@api.model
def _get_default_team(self):
return self.env['crm.team']._get_default_team_id()
@api.onchange('fiscal_position_id')
def _compute_tax_id(self):
"""
Trigger the recompute of the taxes if the fiscal position is changed on the SO.
"""
for order in self:
order.order_line._compute_tax_id()
name = fields.Char(string='Order Reference', required=True, copy=False, readonly=True, states={'draft': [('readonly', False)]}, index=True, default=lambda self: _('New'))
origin = fields.Char(string='Source Document', help="Reference of the document that generated this sales order request.")
client_order_ref = fields.Char(string='Customer Reference', copy=False)
access_token = fields.Char(
'Security Token', copy=False,
default=_get_default_access_token)
state = fields.Selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('sale', 'Sales Order'),
('done', 'Locked'),
('cancel', 'Cancelled'),
], string='Status', readonly=True, copy=False, index=True, track_visibility='onchange', default='draft')
date_order = fields.Datetime(string='Order Date', required=True, readonly=True, index=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=False, default=fields.Datetime.now)
validity_date = fields.Date(string='Quote Validity', readonly=True, copy=False, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="Validity date of the quotation, after this date, the customer won't be able to validate the quotation online.")
is_expired = fields.Boolean(compute='_compute_is_expired', string="Is expired")
create_date = fields.Datetime(string='Creation Date', readonly=True, index=True, help="Date on which sales order is created.")
confirmation_date = fields.Datetime(string='Confirmation Date', readonly=True, index=True, help="Date on which the sales order is confirmed.", oldname="date_confirm", copy=False)
user_id = fields.Many2one('res.users', string='Salesperson', index=True, track_visibility='onchange', default=lambda self: self.env.user)
partner_id = fields.Many2one('res.partner', string='Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, index=True, track_visibility='always')
partner_invoice_id = fields.Many2one('res.partner', string='Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)], 'sale': [('readonly', False)]}, help="Invoice address for current sales order.")
partner_shipping_id = fields.Many2one('res.partner', string='Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)], 'sale': [('readonly', False)]}, help="Delivery address for current sales order.")
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order.")
currency_id = fields.Many2one("res.currency", related='pricelist_id.currency_id', string="Currency", readonly=True, required=True)
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order.", copy=False, oldname='project_id')
order_line = fields.One2many('sale.order.line', 'order_id', string='Order Lines', states={'cancel': [('readonly', True)], 'done': [('readonly', True)]}, copy=True, auto_join=True)
invoice_count = fields.Integer(string='Invoice Count', compute='_get_invoiced', readonly=True)
invoice_ids = fields.Many2many("account.invoice", string='Invoices', compute="_get_invoiced", readonly=True, copy=False)
invoice_status = fields.Selection([
('upselling', 'Upselling Opportunity'),
('invoiced', 'Fully Invoiced'),
('to invoice', 'To Invoice'),
('no', 'Nothing to Invoice')
], string='Invoice Status', compute='_get_invoiced', store=True, readonly=True)
note = fields.Text('Terms and conditions', default=_default_note)
amount_untaxed = fields.Monetary(string='Untaxed Amount', store=True, readonly=True, compute='_amount_all', track_visibility='onchange')
amount_tax = fields.Monetary(string='Taxes', store=True, readonly=True, compute='_amount_all')
amount_total = fields.Monetary(string='Total', store=True, readonly=True, compute='_amount_all', track_visibility='always')
payment_term_id = fields.Many2one('account.payment.term', string='Payment Terms', oldname='payment_term')
fiscal_position_id = fields.Many2one('account.fiscal.position', oldname='fiscal_position', string='Fiscal Position')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env['res.company']._company_default_get('sale.order'))
team_id = fields.Many2one('crm.team', 'Sales Channel', change_default=True, default=_get_default_team, oldname='section_id')
product_id = fields.Many2one('product.product', related='order_line.product_id', string='Product')
def _compute_portal_url(self):
super(SaleOrder, self)._compute_portal_url()
for order in self:
order.portal_url = '/my/orders/%s' % (order.id)
def _compute_is_expired(self):
now = datetime.now()
for order in self:
if order.validity_date and fields.Datetime.from_string(order.validity_date) < now:
order.is_expired = True
else:
order.is_expired = False
@api.model
def _get_customer_lead(self, product_tmpl_id):
return False
@api.multi
def unlink(self):
for order in self:
if order.state not in ('draft', 'cancel'):
raise UserError(_('You can not delete a sent quotation or a sales order! Try to cancel it before.'))
return super(SaleOrder, self).unlink()
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'state' in init_values and self.state == 'sale':
return 'sale.mt_order_confirmed'
elif 'state' in init_values and self.state == 'sent':
return 'sale.mt_order_sent'
return super(SaleOrder, self)._track_subtype(init_values)
@api.multi
@api.onchange('partner_shipping_id', 'partner_id')
def onchange_partner_shipping_id(self):
"""
Trigger the change of fiscal position when the shipping address is modified.
"""
self.fiscal_position_id = self.env['account.fiscal.position'].get_fiscal_position(self.partner_id.id, self.partner_shipping_id.id)
return {}
@api.multi
@api.onchange('partner_id')
def onchange_partner_id(self):
"""
Update the following fields when the partner is changed:
- Pricelist
- Payment terms
- Invoice address
- Delivery address
"""
if not self.partner_id:
self.update({
'partner_invoice_id': False,
'partner_shipping_id': False,
'payment_term_id': False,
'fiscal_position_id': False,
})
return
addr = self.partner_id.address_get(['delivery', 'invoice'])
values = {
'pricelist_id': self.partner_id.property_product_pricelist and self.partner_id.property_product_pricelist.id or False,
'payment_term_id': self.partner_id.property_payment_term_id and self.partner_id.property_payment_term_id.id or False,
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'user_id': self.partner_id.user_id.id or self.env.uid
}
if self.env['ir.config_parameter'].sudo().get_param('sale.use_sale_note') and self.env.user.company_id.sale_note:
values['note'] = self.with_context(lang=self.partner_id.lang).env.user.company_id.sale_note
if self.partner_id.team_id:
values['team_id'] = self.partner_id.team_id.id
self.update(values)
@api.onchange('partner_id')
def onchange_partner_id_warning(self):
if not self.partner_id:
return
warning = {}
title = False
message = False
partner = self.partner_id
# If partner has no warning, check its company
if partner.sale_warn == 'no-message' and partner.parent_id:
partner = partner.parent_id
if partner.sale_warn and partner.sale_warn != 'no-message':
# Block if partner only has warning but parent company is blocked
if partner.sale_warn != 'block' and partner.parent_id and partner.parent_id.sale_warn == 'block':
partner = partner.parent_id
title = ("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
if partner.sale_warn == 'block':
self.update({'partner_id': False, 'partner_invoice_id': False, 'partner_shipping_id': False, 'pricelist_id': False})
return {'warning': warning}
if warning:
return {'warning': warning}
@api.model
def create(self, vals):
if vals.get('name', _('New')) == _('New'):
if 'company_id' in vals:
vals['name'] = self.env['ir.sequence'].with_context(force_company=vals['company_id']).next_by_code('sale.order') or _('New')
else:
vals['name'] = self.env['ir.sequence'].next_by_code('sale.order') or _('New')
# Makes sure partner_invoice_id', 'partner_shipping_id' and 'pricelist_id' are defined
if any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):
partner = self.env['res.partner'].browse(vals.get('partner_id'))
addr = partner.address_get(['delivery', 'invoice'])
vals['partner_invoice_id'] = vals.setdefault('partner_invoice_id', addr['invoice'])
vals['partner_shipping_id'] = vals.setdefault('partner_shipping_id', addr['delivery'])
vals['pricelist_id'] = vals.setdefault('pricelist_id', partner.property_product_pricelist and partner.property_product_pricelist.id)
result = super(SaleOrder, self).create(vals)
return result
@api.multi
def _write(self, values):
""" Override of private write method in order to generate activities
based in the invoice status. As the invoice status is a computed field
triggered notably when its lines and linked invoice status changes the
flow does not necessarily goes through write if the action was not done
on the SO itself. We hence override the _write to catch the computation
of invoice_status field. """
if self.env.context.get('mail_activity_automation_skip'):
return super(SaleOrder, self)._write(values)
res = super(SaleOrder, self)._write(values)
if 'invoice_status' in values:
self.activity_unlink(['sale.mail_act_sale_upsell'])
if values['invoice_status'] == 'upselling':
for order in self:
order.activity_schedule(
'sale.mail_act_sale_upsell', fields.Date.today(),
user_id=order.user_id.id,
note=_("Upsell <a href='#' data-oe-model='%s' data-oe-id='%d'>%s</a> for customer <a href='#' data-oe-model='%s' data-oe-id='%s'>%s</a>") % (
order._name, order.id, order.name,
order.partner_id._name, order.partner_id.id, order.partner_id.display_name))
return res
@api.multi
def copy_data(self, default=None):
if default is None:
default = {}
if 'order_line' not in default:
default['order_line'] = [(0, 0, line.copy_data()[0]) for line in self.order_line.filtered(lambda l: not l.is_downpayment)]
return super(SaleOrder, self).copy_data(default)
@api.multi
def name_get(self):
if self._context.get('sale_show_partner_name'):
res = []
for order in self:
name = order.name
if order.partner_id.name:
name = '%s - %s' % (name, order.partner_id.name)
res.append((order.id, name))
return res
return super(SaleOrder, self).name_get()
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
if self._context.get('sale_show_partner_name'):
if operator in ('ilike', 'like', '=', '=like', '=ilike'):
domain = expression.AND([
args or [],
['|', ('name', operator, name), ('partner_id.name', operator, name)]
])
return self.search(domain, limit=limit).name_get()
return super(SaleOrder, self).name_search(name, args, operator, limit)
@api.model_cr_context
def _init_column(self, column_name):
""" Initialize the value of the given column for existing rows.
Overridden here because we need to generate different access tokens
and by default _init_column calls the default method once and applies
it for every record.
"""
if column_name != 'access_token':
super(SaleOrder, self)._init_column(column_name)
else:
query = """UPDATE %(table_name)s
SET %(column_name)s = md5(md5(random()::varchar || id::varchar) || clock_timestamp()::varchar)::uuid::varchar
WHERE %(column_name)s IS NULL
""" % {'table_name': self._table, 'column_name': column_name}
self.env.cr.execute(query)
def _generate_access_token(self):
for order in self:
order.access_token = self._get_default_access_token()
@api.multi
def _prepare_invoice(self):
"""
Prepare the dict of values to create the new invoice for a sales order. This method may be
overridden to implement custom invoice generation (making sure to call super() to establish
a clean extension chain).
"""
self.ensure_one()
journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']
if not journal_id:
raise UserError(_('Please define an accounting sales journal for this company.'))
invoice_vals = {
'name': self.client_order_ref or '',
'origin': self.name,
'type': 'out_invoice',
'account_id': self.partner_invoice_id.property_account_receivable_id.id,
'partner_id': self.partner_invoice_id.id,
'partner_shipping_id': self.partner_shipping_id.id,
'journal_id': journal_id,
'currency_id': self.pricelist_id.currency_id.id,
'comment': self.note,
'payment_term_id': self.payment_term_id.id,
'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,
'company_id': self.company_id.id,
'user_id': self.user_id and self.user_id.id,
'team_id': self.team_id.id
}
return invoice_vals
@api.multi
def print_quotation(self):
self.filtered(lambda s: s.state == 'draft').write({'state': 'sent'})
return self.env.ref('sale.action_report_saleorder').report_action(self)
@api.multi
def action_view_invoice(self):
invoices = self.mapped('invoice_ids')
action = self.env.ref('account.action_invoice_tree1').read()[0]
if len(invoices) > 1:
action['domain'] = [('id', 'in', invoices.ids)]
elif len(invoices) == 1:
action['views'] = [(self.env.ref('account.invoice_form').id, 'form')]
action['res_id'] = invoices.ids[0]
else:
action = {'type': 'ir.actions.act_window_close'}
return action
@api.multi
def action_invoice_create(self, grouped=False, final=False):
"""
Create the invoice associated to the SO.
:param grouped: if True, invoices are grouped by SO id. If False, invoices are grouped by
(partner_invoice_id, currency)
:param final: if True, refunds will be generated if necessary
:returns: list of created invoices
"""
inv_obj = self.env['account.invoice']
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
invoices = {}
references = {}
for order in self:
group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)
for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):
if float_is_zero(line.qty_to_invoice, precision_digits=precision):
continue
if group_key not in invoices:
inv_data = order._prepare_invoice()
invoice = inv_obj.create(inv_data)
references[invoice] = order
invoices[group_key] = invoice
elif group_key in invoices:
vals = {}
if order.name not in invoices[group_key].origin.split(', '):
vals['origin'] = invoices[group_key].origin + ', ' + order.name
if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(', ') and order.client_order_ref != invoices[group_key].name:
vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref
invoices[group_key].write(vals)
if line.qty_to_invoice > 0:
line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)
elif line.qty_to_invoice < 0 and final:
line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)
if references.get(invoices.get(group_key)):
if order not in references[invoices[group_key]]:
references[invoices[group_key]] |= order
if not invoices:
raise UserError(_('There is no invoiceable line.'))
for invoice in invoices.values():
if not invoice.invoice_line_ids:
raise UserError(_('There is no invoiceable line.'))
# If invoice is negative, do a refund invoice instead
if invoice.amount_untaxed < 0:
invoice.type = 'out_refund'
for line in invoice.invoice_line_ids:
line.quantity = -line.quantity
# Use additional field helper function (for account extensions)
for line in invoice.invoice_line_ids:
line._set_additional_fields(invoice)
# Necessary to force computation of taxes. In account_invoice, they are triggered
# by onchanges, which are not triggered when doing a create.
invoice.compute_taxes()
invoice.message_post_with_view('mail.message_origin_link',
values={'self': invoice, 'origin': references[invoice]},
subtype_id=self.env.ref('mail.mt_note').id)
return [inv.id for inv in invoices.values()]
@api.multi
def action_draft(self):
orders = self.filtered(lambda s: s.state in ['cancel', 'sent'])
return orders.write({
'state': 'draft',
})
@api.multi
def action_cancel(self):
return self.write({'state': 'cancel'})
@api.multi
def action_quotation_send(self):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
self.ensure_one()
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = {
'default_model': 'sale.order',
'default_res_id': self.ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True,
'custom_layout': "mail.mail_notification_borders",
'proforma': self.env.context.get('proforma', False),
'force_email': True
}
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
@api.multi
@api.returns('self', lambda value: value.id)
def message_post(self, **kwargs):
if self.env.context.get('mark_so_as_sent'):
self.filtered(lambda o: o.state == 'draft').with_context(tracking_disable=True).write({'state': 'sent'})
return super(SaleOrder, self.with_context(mail_post_autofollow=True)).message_post(**kwargs)
@api.multi
def force_quotation_send(self):
for order in self:
email_act = order.action_quotation_send()
if email_act and email_act.get('context'):
email_ctx = email_act['context']
email_ctx.update(default_email_from=order.company_id.email)
order.with_context(email_ctx).message_post_with_template(email_ctx.get('default_template_id'))
return True
@api.multi
def action_done(self):
return self.write({'state': 'done'})
@api.multi
def action_unlock(self):
self.write({'state': 'sale'})
@api.multi
def _action_confirm(self):
for order in self.filtered(lambda order: order.partner_id not in order.message_partner_ids):
order.message_subscribe([order.partner_id.id])
self.write({
'state': 'sale',
'confirmation_date': fields.Datetime.now()
})
if self.env.context.get('send_email'):
self.force_quotation_send()
# create an analytic account if at least an expense product
if any([expense_policy != 'no' for expense_policy in self.order_line.mapped('product_id.expense_policy')]):
if not self.analytic_account_id:
self._create_analytic_account()
return True
@api.multi
def action_confirm(self):
self._action_confirm()
if self.env['ir.config_parameter'].sudo().get_param('sale.auto_done_setting'):
self.action_done()
return True
@api.multi
def _create_analytic_account(self, prefix=None):
for order in self:
name = order.name
if prefix:
name = prefix + ": " + order.name
analytic = self.env['account.analytic.account'].create({
'name': name,
'code': order.client_order_ref,
'company_id': order.company_id.id,
'partner_id': order.partner_id.id
})
order.analytic_account_id = analytic
@api.multi
def order_lines_layouted(self):
"""
Returns this order lines classified by sale_layout_category and separated in
pages according to the category pagebreaks. Used to render the report.
"""
self.ensure_one()
report_pages = [[]]
for category, lines in groupby(self.order_line, lambda l: l.layout_category_id):
# If last added category induced a pagebreak, this one will be on a new page
if report_pages[-1] and report_pages[-1][-1]['pagebreak']:
report_pages.append([])
# Append category to current report page
report_pages[-1].append({
'name': category and category.name or _('Uncategorized'),
'subtotal': category and category.subtotal,
'pagebreak': category and category.pagebreak,
'lines': list(lines)
})
return report_pages
@api.multi
def _get_tax_amount_by_group(self):
self.ensure_one()
res = {}
for line in self.order_line:
price_reduce = line.price_unit * (1.0 - line.discount / 100.0)
taxes = line.tax_id.compute_all(price_reduce, quantity=line.product_uom_qty, product=line.product_id, partner=self.partner_shipping_id)['taxes']
for tax in line.tax_id:
group = tax.tax_group_id
res.setdefault(group, {'amount': 0.0, 'base': 0.0})
for t in taxes:
if t['id'] == tax.id or t['id'] in tax.children_tax_ids.ids:
res[group]['amount'] += t['amount']
res[group]['base'] += t['base']
res = sorted(res.items(), key=lambda l: l[0].sequence)
res = [(l[0].name, l[1]['amount'], l[1]['base'], len(res)) for l in res]
return res
@api.multi
def get_access_action(self, access_uid=None):
""" Instead of the classic form view, redirect to the online order for
portal users or if force_website=True in the context. """
# TDE note: read access on sales order to portal users granted to followed sales orders
self.ensure_one()
if self.state != 'cancel' and (self.state != 'draft' or self.env.context.get('mark_so_as_sent')):
user, record = self.env.user, self
if access_uid:
user = self.env['res.users'].sudo().browse(access_uid)
record = self.sudo(user)
if user.share or self.env.context.get('force_website'):
try:
record.check_access_rule('read')
except AccessError:
if self.env.context.get('force_website'):
return {
'type': 'ir.actions.act_url',
'url': '/my/orders/%s' % self.id,
'target': 'self',
'res_id': self.id,
}
else:
pass
else:
return {
'type': 'ir.actions.act_url',
'url': '/my/orders/%s?access_token=%s' % (self.id, self.access_token),
'target': 'self',
'res_id': self.id,
}
return super(SaleOrder, self).get_access_action(access_uid)
def get_mail_url(self):
return self.get_share_url()
def get_portal_confirmation_action(self):
return self.env['ir.config_parameter'].sudo().get_param('sale.sale_portal_confirmation_options', default='none')
@api.multi
def _notify_get_groups(self, message, groups):
""" Give access button to users and portal customer as portal is integrated
in sale. Customer and portal group have probably no right to see
the document so they don't have the access button. """
groups = super(SaleOrder, self)._notify_get_groups(message, groups)
self.ensure_one()
if self.state not in ('draft', 'cancel'):
for group_name, group_method, group_data in groups:
if group_name in ('customer', 'portal'):
continue
group_data['has_button_access'] = True
return groups
class SaleOrderLine(models.Model):
_name = 'sale.order.line'
_description = 'Sales Order Line'
_order = 'order_id, layout_category_id, sequence, id'
@api.depends('state', 'product_uom_qty', 'qty_delivered', 'qty_to_invoice', 'qty_invoiced')
def _compute_invoice_status(self):
"""
Compute the invoice status of a SO line. Possible statuses:
- no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to
invoice. This is also hte default value if the conditions of no other status is met.
- to invoice: we refer to the quantity to invoice of the line. Refer to method
`_get_to_invoice_qty()` for more information on how this quantity is calculated.
- upselling: this is possible only for a product invoiced on ordered quantities for which
we delivered more than expected. The could arise if, for example, a project took more
time than expected but we decided not to invoice the extra cost to the client. This
occurs onyl in state 'sale', so that when a SO is set to done, the upselling opportunity
is removed from the list.
- invoiced: the quantity invoiced is larger or equal to the quantity ordered.
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if line.state not in ('sale', 'done'):
line.invoice_status = 'no'
elif not float_is_zero(line.qty_to_invoice, precision_digits=precision):
line.invoice_status = 'to invoice'
elif line.state == 'sale' and line.product_id.invoice_policy == 'order' and\
float_compare(line.qty_delivered, line.product_uom_qty, precision_digits=precision) == 1:
line.invoice_status = 'upselling'
elif float_compare(line.qty_invoiced, line.product_uom_qty, precision_digits=precision) >= 0:
line.invoice_status = 'invoiced'
else:
line.invoice_status = 'no'
@api.depends('product_uom_qty', 'discount', 'price_unit', 'tax_id')
def _compute_amount(self):
"""
Compute the amounts of the SO line.
"""
for line in self:
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_shipping_id)
line.update({
'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),
'price_total': taxes['total_included'],
'price_subtotal': taxes['total_excluded'],
})
@api.depends('product_id', 'order_id.state', 'qty_invoiced', 'qty_delivered')
def _compute_product_updatable(self):
for line in self:
if line.state in ['done', 'cancel'] or (line.state == 'sale' and (line.qty_invoiced > 0 or line.qty_delivered > 0)):
line.product_updatable = False
else:
line.product_updatable = True
@api.depends('qty_invoiced', 'qty_delivered', 'product_uom_qty', 'order_id.state')
def _get_to_invoice_qty(self):
"""
Compute the quantity to invoice. If the invoice policy is order, the quantity to invoice is
calculated from the ordered quantity. Otherwise, the quantity delivered is used.
"""
for line in self:
if line.order_id.state in ['sale', 'done']:
if line.product_id.invoice_policy == 'order':
line.qty_to_invoice = line.product_uom_qty - line.qty_invoiced
else:
line.qty_to_invoice = line.qty_delivered - line.qty_invoiced
else:
line.qty_to_invoice = 0
@api.depends('invoice_lines.invoice_id.state', 'invoice_lines.quantity')
def _get_invoice_qty(self):
"""
Compute the quantity invoiced. If case of a refund, the quantity invoiced is decreased. Note
that this is the case only if the refund is generated from the SO and that is intentional: if
a refund made would automatically decrease the invoiced quantity, then there is a risk of reinvoicing
it automatically, which may not be wanted at all. That's why the refund has to be created from the SO
"""
for line in self:
qty_invoiced = 0.0
for invoice_line in line.invoice_lines:
if invoice_line.invoice_id.state != 'cancel':
if invoice_line.invoice_id.type == 'out_invoice':
qty_invoiced += invoice_line.uom_id._compute_quantity(invoice_line.quantity, line.product_uom)
elif invoice_line.invoice_id.type == 'out_refund':
qty_invoiced -= invoice_line.uom_id._compute_quantity(invoice_line.quantity, line.product_uom)
line.qty_invoiced = qty_invoiced
@api.depends('price_unit', 'discount')
def _get_price_reduce(self):
for line in self:
line.price_reduce = line.price_unit * (1.0 - line.discount / 100.0)
@api.depends('price_total', 'product_uom_qty')
def _get_price_reduce_tax(self):
for line in self:
line.price_reduce_taxinc = line.price_total / line.product_uom_qty if line.product_uom_qty else 0.0
@api.depends('price_subtotal', 'product_uom_qty')
def _get_price_reduce_notax(self):
for line in self:
line.price_reduce_taxexcl = line.price_subtotal / line.product_uom_qty if line.product_uom_qty else 0.0
@api.multi
def _compute_tax_id(self):
for line in self:
fpos = line.order_id.fiscal_position_id or line.order_id.partner_id.property_account_position_id
# If company_id is set, always filter taxes by the company
taxes = line.product_id.taxes_id.filtered(lambda r: not line.company_id or r.company_id == line.company_id)
line.tax_id = fpos.map_tax(taxes, line.product_id, line.order_id.partner_shipping_id) if fpos else taxes
@api.model
def _get_purchase_price(self, pricelist, product, product_uom, date):
return {}
@api.model
def _prepare_add_missing_fields(self, values):
""" Deduce missing required fields from the onchange """
res = {}
onchange_fields = ['name', 'price_unit', 'product_uom', 'tax_id']
if values.get('order_id') and values.get('product_id') and any(f not in values for f in onchange_fields):
line = self.new(values)
line.product_id_change()
for field in onchange_fields:
if field not in values:
res[field] = line._fields[field].convert_to_write(line[field], line)
return res
@api.model
def create(self, values):
values.update(self._prepare_add_missing_fields(values))
line = super(SaleOrderLine, self).create(values)
if line.order_id.state == 'sale':
msg = _("Extra line with %s ") % (line.product_id.display_name,)
line.order_id.message_post(body=msg)
# create an analytic account if at least an expense product
if line.product_id.expense_policy != 'no' and not self.order_id.analytic_account_id:
self.order_id._create_analytic_account()
return line
def _update_line_quantity(self, values):
orders = self.mapped('order_id')
for order in orders:
order_lines = self.filtered(lambda x: x.order_id == order)
msg = "<b>The ordered quantity has been updated.</b><ul>"
for line in order_lines:
msg += "<li> %s:" % (line.product_id.display_name,)
msg += "<br/>" + _("Ordered Quantity") + ": %s -> %s <br/>" % (
line.product_uom_qty, float(values['product_uom_qty']),)
if line.product_id.type in ('consu', 'product'):
msg += _("Delivered Quantity") + ": %s <br/>" % (line.qty_delivered,)
msg += _("Invoiced Quantity") + ": %s <br/>" % (line.qty_invoiced,)
msg += "</ul>"
order.message_post(body=msg)
@api.multi
def write(self, values):
if 'product_uom_qty' in values:
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
self.filtered(
lambda r: r.state == 'sale' and float_compare(r.product_uom_qty, values['product_uom_qty'], precision_digits=precision) != 0)._update_line_quantity(values)
# Prevent writing on a locked SO.
protected_fields = self._get_protected_fields()
if 'done' in self.mapped('order_id.state') and any(f in values.keys() for f in protected_fields):
protected_fields_modified = list(set(protected_fields) & set(values.keys()))
fields = self.env['ir.model.fields'].search([
('name', 'in', protected_fields_modified), ('model', '=', self._name)
])
raise UserError(
_('It is forbidden to modify the following fields in a locked order:\n%s')
% '\n'.join(fields.mapped('field_description'))
)
result = super(SaleOrderLine, self).write(values)
return result
order_id = fields.Many2one('sale.order', string='Order Reference', required=True, ondelete='cascade', index=True, copy=False)
name = fields.Text(string='Description', required=True)
sequence = fields.Integer(string='Sequence', default=10)
invoice_lines = fields.Many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_line_id', string='Invoice Lines', copy=False)
invoice_status = fields.Selection([
('upselling', 'Upselling Opportunity'),
('invoiced', 'Fully Invoiced'),
('to invoice', 'To Invoice'),
('no', 'Nothing to Invoice')
], string='Invoice Status', compute='_compute_invoice_status', store=True, readonly=True, default='no')
price_unit = fields.Float('Unit Price', required=True, digits=dp.get_precision('Product Price'), default=0.0)
price_subtotal = fields.Monetary(compute='_compute_amount', string='Subtotal', readonly=True, store=True)
price_tax = fields.Float(compute='_compute_amount', string='Total Tax', readonly=True, store=True)
price_total = fields.Monetary(compute='_compute_amount', string='Total', readonly=True, store=True)
price_reduce = fields.Float(compute='_get_price_reduce', string='Price Reduce', digits=dp.get_precision('Product Price'), readonly=True, store=True)
tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', ('active', '=', False), ('active', '=', True)])
price_reduce_taxinc = fields.Monetary(compute='_get_price_reduce_tax', string='Price Reduce Tax inc', readonly=True, store=True)
price_reduce_taxexcl = fields.Monetary(compute='_get_price_reduce_notax', string='Price Reduce Tax excl', readonly=True, store=True)
discount = fields.Float(string='Discount (%)', digits=dp.get_precision('Discount'), default=0.0)
product_id = fields.Many2one('product.product', string='Product', domain=[('sale_ok', '=', True)], change_default=True, ondelete='restrict', required=True)
product_updatable = fields.Boolean(compute='_compute_product_updatable', string='Can Edit Product', readonly=True, default=True)
product_uom_qty = fields.Float(string='Ordered Quantity', digits=dp.get_precision('Product Unit of Measure'), required=True, default=1.0)
product_uom = fields.Many2one('uom.uom', string='Unit of Measure', required=True)
# Non-stored related field to allow portal user to see the image of the product he has ordered
product_image = fields.Binary('Product Image', related="product_id.image", store=False)
qty_delivered_method = fields.Selection([
('manual', 'Manual'),
('analytic', 'Analytic From Expenses')
], string="Method to update delivered qty", compute='_compute_qty_delivered_method', compute_sudo=True, store=True, readonly=True,
help="According to product configuration, the delivered quantity can be automatically computed by mechanism :\n"
" - Manual: the quantity is set manually on the line\n"
" - Analytic From expenses: the quantity is the quantity sum from posted expenses\n"
" - Timesheet: the quantity is the sum of hours recorded on tasks linked to this sale line\n"
" - Stock Moves: the quantity comes from confirmed pickings\n")
qty_delivered = fields.Float('Delivered', copy=False, compute='_compute_qty_delivered', inverse='_inverse_qty_delivered', compute_sudo=True, store=True, digits=dp.get_precision('Product Unit of Measure'), default=0.0)
qty_delivered_manual = fields.Float('Delivered Manually', copy=False, digits=dp.get_precision('Product Unit of Measure'), default=0.0)
qty_to_invoice = fields.Float(
compute='_get_to_invoice_qty', string='To Invoice', store=True, readonly=True,
digits=dp.get_precision('Product Unit of Measure'))
qty_invoiced = fields.Float(
compute='_get_invoice_qty', string='Invoiced', store=True, readonly=True,
digits=dp.get_precision('Product Unit of Measure'))
salesman_id = fields.Many2one(related='order_id.user_id', store=True, string='Salesperson', readonly=True)
currency_id = fields.Many2one(related='order_id.currency_id', depends=['order_id'], store=True, string='Currency', readonly=True)
company_id = fields.Many2one(related='order_id.company_id', string='Company', store=True, readonly=True)
order_partner_id = fields.Many2one(related='order_id.partner_id', store=True, string='Customer')
analytic_tag_ids = fields.Many2many('account.analytic.tag', string='Analytic Tags')
analytic_line_ids = fields.One2many('account.analytic.line', 'so_line', string="Analytic lines")
is_expense = fields.Boolean('Is expense', help="Is true if the sales order line comes from an expense or a vendor bills")
is_downpayment = fields.Boolean(
string="Is a down payment", help="Down payments are made when creating invoices from a sales order."
" They are not copied when duplicating a sales order.")
state = fields.Selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('sale', 'Sales Order'),
('done', 'Done'),
('cancel', 'Cancelled'),
], related='order_id.state', string='Order Status', readonly=True, copy=False, store=True, default='draft')
customer_lead = fields.Float(
'Delivery Lead Time', required=True, default=0.0,
help="Number of days between the order confirmation and the shipping of the products to the customer", oldname="delay")
layout_category_id = fields.Many2one('sale.layout_category', string='Section')
layout_category_sequence = fields.Integer(string='Layout Sequence')
# TODO: remove layout_category_sequence in master or make it work properly
@api.multi
@api.depends('state', 'is_expense')
def _compute_qty_delivered_method(self):
""" Sale module compute delivered qty for product [('type', 'in', ['consu']), ('service_type', '=', 'manual')]
- consu + expense_policy : analytic (sum of analytic unit_amount)
- consu + no expense_policy : manual (set manually on SOL)
- service (+ service_type='manual', the only available option) : manual
This is true when only sale is installed: sale_stock redifine the behavior for 'consu' type,
and sale_timesheet implements the behavior of 'service' + service_type=timesheet.
"""
for line in self:
if line.is_expense:
line.qty_delivered_method = 'analytic'
else: # service and consu
line.qty_delivered_method = 'manual'
@api.multi
@api.depends('qty_delivered_method', 'qty_delivered_manual', 'analytic_line_ids.so_line', 'analytic_line_ids.unit_amount', 'analytic_line_ids.product_uom_id')
def _compute_qty_delivered(self):
""" This method compute the delivered quantity of the SO lines: it covers the case provide by sale module, aka
expense/vendor bills (sum of unit_amount of AAL), and manual case.
This method should be overriden to provide other way to automatically compute delivered qty. Overrides should
take their concerned so lines, compute and set the `qty_delivered` field, and call super with the remaining
records.
"""
# compute for analytic lines
lines_by_analytic = self.filtered(lambda sol: sol.qty_delivered_method == 'analytic')
mapping = lines_by_analytic._get_delivered_quantity_by_analytic([('amount', '<=', 0.0)])
for so_line in lines_by_analytic:
so_line.qty_delivered = mapping.get(so_line.id, 0.0)
# compute for manual lines
for line in self:
if line.qty_delivered_method == 'manual':
line.qty_delivered = line.qty_delivered_manual or 0.0
@api.multi
def _get_delivered_quantity_by_analytic(self, additional_domain):
""" Compute and write the delivered quantity of current SO lines, based on their related
analytic lines.
:param additional_domain: domain to restrict AAL to include in computation (required since timesheet is an AAL with a project ...)
"""
result = {}
# avoid recomputation if no SO lines concerned
if not self:
return result
# group anaytic lines by product uom and so line
domain = expression.AND([[('so_line', 'in', self.ids)], additional_domain])
data = self.env['account.analytic.line'].read_group(
domain,
['so_line', 'unit_amount', 'product_uom_id'], ['product_uom_id', 'so_line'], lazy=False
)
# convert uom and sum all unit_amount of analytic lines to get the delivered qty of SO lines
# browse so lines and product uoms here to make them share the same prefetch
lines_map = {line.id: line for line in self}
product_uom_ids = [item['product_uom_id'][0] for item in data if item['product_uom_id']]
product_uom_map = {uom.id: uom for uom in self.env['uom.uom'].browse(product_uom_ids)}
for item in data:
if not item['product_uom_id']:
continue
so_line_id = item['so_line'][0]
so_line = lines_map[so_line_id]
result.setdefault(so_line_id, 0.0)
uom = product_uom_map.get(item['product_uom_id'][0])
if so_line.product_uom.category_id == uom.category_id:
qty = uom._compute_quantity(item['unit_amount'], so_line.product_uom)
else:
qty = item['unit_amount']
result[so_line_id] += qty
return result
@api.multi
@api.onchange('qty_delivered')
def _inverse_qty_delivered(self):
""" When writing on qty_delivered, if the value should be modify manually (`qty_delivered_method` = 'manual' only),
then we put the value in `qty_delivered_manual`. Otherwise, `qty_delivered_manual` should be False since the
delivered qty is automatically compute by other mecanisms.
"""
for line in self:
if line.qty_delivered_method == 'manual':
line.qty_delivered_manual = line.qty_delivered
else:
line.qty_delivered_manual = 0.0
@api.multi
def _prepare_invoice_line(self, qty):
"""
Prepare the dict of values to create the new invoice line for a sales order line.
:param qty: float quantity to invoice
"""
self.ensure_one()
res = {}
account = self.product_id.property_account_income_id or self.product_id.categ_id.property_account_income_categ_id
if not account:
raise UserError(_('Please define income account for this product: "%s" (id:%d) - or for its category: "%s".') %
(self.product_id.name, self.product_id.id, self.product_id.categ_id.name))
fpos = self.order_id.fiscal_position_id or self.order_id.partner_id.property_account_position_id
if fpos:
account = fpos.map_account(account)
res = {
'name': self.name,
'sequence': self.sequence,
'origin': self.order_id.name,
'account_id': account.id,
'price_unit': self.price_unit,
'quantity': qty,
'discount': self.discount,
'uom_id': self.product_uom.id,
'product_id': self.product_id.id or False,
'layout_category_id': self.layout_category_id and self.layout_category_id.id or False,
'invoice_line_tax_ids': [(6, 0, self.tax_id.ids)],
'account_analytic_id': self.order_id.analytic_account_id.id,
'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],
}
return res
@api.multi
def invoice_line_create(self, invoice_id, qty):
""" Create an invoice line. The quantity to invoice can be positive (invoice) or negative (refund).
:param invoice_id: integer
:param qty: float quantity to invoice
:returns recordset of account.invoice.line created
"""
invoice_lines = self.env['account.invoice.line']
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if not float_is_zero(qty, precision_digits=precision):
vals = line._prepare_invoice_line(qty=qty)
vals.update({'invoice_id': invoice_id, 'sale_line_ids': [(6, 0, [line.id])]})
invoice_lines |= self.env['account.invoice.line'].create(vals)
return invoice_lines
@api.multi
def _prepare_procurement_values(self, group_id=False):
""" Prepare specific key for moves or other components that will be created from a procurement rule
comming from a sale order line. This method could be override in order to add other custom key that could
be used in move/po creation.
"""
return {}
@api.multi
def _get_display_price(self, product):
# TO DO: move me in master/saas-16 on sale.order
if self.order_id.pricelist_id.discount_policy == 'with_discount':
return product.with_context(pricelist=self.order_id.pricelist_id.id).price
final_price, rule_id = self.order_id.pricelist_id.get_product_price_rule(self.product_id, self.product_uom_qty or 1.0, self.order_id.partner_id)
context_partner = dict(self.env.context, partner_id=self.order_id.partner_id.id, date=self.order_id.date_order)
base_price, currency_id = self.with_context(context_partner)._get_real_price_currency(self.product_id, rule_id, self.product_uom_qty, self.product_uom, self.order_id.pricelist_id.id)
if currency_id != self.order_id.pricelist_id.currency_id.id:
currency = self.env['res.currency'].browse(currency_id)
base_price = currency._convert(
base_price, self.order_id.pricelist_id.currency_id,
self.order_id.company_id, self.order_id.date_order or fields.Date.today())
# negative discounts (= surcharge) are included in the display price
return max(base_price, final_price)
@api.multi
@api.onchange('product_id')
def product_id_change(self):
if not self.product_id:
return {'domain': {'product_uom': []}}
vals = {}
domain = {'product_uom': [('category_id', '=', self.product_id.uom_id.category_id.id)]}
if not self.product_uom or (self.product_id.uom_id.id != self.product_uom.id):
vals['product_uom'] = self.product_id.uom_id
vals['product_uom_qty'] = 1.0
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=vals.get('product_uom_qty') or self.product_uom_qty,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id
)
result = {'domain': domain}
title = False
message = False
warning = {}
if product.sale_line_warn != 'no-message':
title = _("Warning for %s") % product.name
message = product.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
result = {'warning': warning}
if product.sale_line_warn == 'block':
self.product_id = False
return result
name = product.name_get()[0][1]
if product.description_sale:
name += '\n' + product.description_sale
vals['name'] = name
self._compute_tax_id()
if self.order_id.pricelist_id and self.order_id.partner_id:
vals['price_unit'] = self.env['account.tax']._fix_tax_included_price_company(self._get_display_price(product), product.taxes_id, self.tax_id, self.company_id)
self.update(vals)
return result
@api.onchange('product_uom', 'product_uom_qty')
def product_uom_change(self):
if not self.product_uom or not self.product_id:
self.price_unit = 0.0
return
if self.order_id.pricelist_id and self.order_id.partner_id:
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=self.product_uom_qty,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id,
fiscal_position=self.env.context.get('fiscal_position')
)
self.price_unit = self.env['account.tax']._fix_tax_included_price_company(self._get_display_price(product), product.taxes_id, self.tax_id, self.company_id)
@api.multi
def name_get(self):
result = []
for so_line in self:
name = '%s - %s' % (so_line.order_id.name, so_line.name.split('\n')[0] or so_line.product_id.name)
if so_line.order_partner_id.ref:
name = '%s (%s)' % (name, so_line.order_partner_id.ref)
result.append((so_line.id, name))
return result
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
if operator in ('ilike', 'like', '=', '=like', '=ilike'):
args = expression.AND([
args or [],
['|', ('order_id.name', operator, name), ('name', operator, name)]
])
return super(SaleOrderLine, self).name_search(name, args, operator, limit)
@api.multi
def unlink(self):
if self.filtered(lambda x: x.state in ('sale', 'done')):
raise UserError(_('You can not remove an order line once the sales order is confirmed.\nYou should rather set the quantity to 0.'))
return super(SaleOrderLine, self).unlink()
def _get_real_price_currency(self, product, rule_id, qty, uom, pricelist_id):
"""Retrieve the price before applying the pricelist
:param obj product: object of current product record
:parem float qty: total quentity of product
:param tuple price_and_rule: tuple(price, suitable_rule) coming from pricelist computation
:param obj uom: unit of measure of current order line
:param integer pricelist_id: pricelist id of sales order"""
PricelistItem = self.env['product.pricelist.item']
field_name = 'lst_price'
currency_id = None
product_currency = None
if rule_id:
pricelist_item = PricelistItem.browse(rule_id)
if pricelist_item.pricelist_id.discount_policy == 'without_discount':
while pricelist_item.base == 'pricelist' and pricelist_item.base_pricelist_id and pricelist_item.base_pricelist_id.discount_policy == 'without_discount':
price, rule_id = pricelist_item.base_pricelist_id.with_context(uom=uom.id).get_product_price_rule(product, qty, self.order_id.partner_id)
pricelist_item = PricelistItem.browse(rule_id)
if pricelist_item.base == 'standard_price':
field_name = 'standard_price'
if pricelist_item.base == 'pricelist' and pricelist_item.base_pricelist_id:
field_name = 'price'
product = product.with_context(pricelist=pricelist_item.base_pricelist_id.id)
product_currency = pricelist_item.base_pricelist_id.currency_id
currency_id = pricelist_item.pricelist_id.currency_id
product_currency = product_currency or(product.company_id and product.company_id.currency_id) or self.env.user.company_id.currency_id
if not currency_id:
currency_id = product_currency
cur_factor = 1.0
else:
if currency_id.id == product_currency.id:
cur_factor = 1.0
else:
cur_factor = currency_id._get_conversion_rate(product_currency, currency_id, self.company_id, self.order_id.date_order)
product_uom = self.env.context.get('uom') or product.uom_id.id
if uom and uom.id != product_uom:
# the unit price is in a different uom
uom_factor = uom._compute_price(1.0, product.uom_id)
else:
uom_factor = 1.0
return product[field_name] * uom_factor * cur_factor, currency_id
def _get_protected_fields(self):
return [
'product_id', 'name', 'price_unit', 'product_uom', 'product_uom_qty',
'tax_id', 'analytic_tag_ids'
]
@api.onchange('product_id', 'price_unit', 'product_uom', 'product_uom_qty', 'tax_id')
def _onchange_discount(self):
self.discount = 0.0
if not (self.product_id and self.product_uom and
self.order_id.partner_id and self.order_id.pricelist_id and
self.order_id.pricelist_id.discount_policy == 'without_discount' and
self.env.user.has_group('sale.group_discount_per_so_line')):
return
context_partner = dict(self.env.context, partner_id=self.order_id.partner_id.id, date=self.order_id.date_order)
pricelist_context = dict(context_partner, uom=self.product_uom.id)
price, rule_id = self.order_id.pricelist_id.with_context(pricelist_context).get_product_price_rule(self.product_id, self.product_uom_qty or 1.0, self.order_id.partner_id)
new_list_price, currency_id = self.with_context(context_partner)._get_real_price_currency(self.product_id, rule_id, self.product_uom_qty, self.product_uom, self.order_id.pricelist_id.id)
if new_list_price != 0:
if self.order_id.pricelist_id.currency_id.id != currency_id:
# we need new_list_price in the same currency as price, which is in the SO's pricelist's currency
currency = self.env['res.currency'].browse(currency_id)
new_list_price = currency._convert(
new_list_price, self.order_id.pricelist_id.currency_id,
self.order_id.company_id, self.order_id.date_order or fields.Date.today())
discount = (new_list_price - price) / new_list_price * 100
if discount > 0:
self.discount = discount
| maxive/erp | addons/sale/models/sale.py | Python | agpl-3.0 | 64,688 |
# coding: utf-8
# @ 2015 Valentin CHEMIERE @ Akretion
# © @author Mourad EL HADJ MIMOUNE <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import logging
import os
import threading
import odoo
from odoo import models, api
# from odoo.addons.auth_signup.controllers.main import AuthSignupHome
from odoo.addons.tko_document_ocr.models.ir_attachment import \
_PDF_OCR_DOCUMENTS_THREADS
_logger = logging.getLogger(__name__)
class Task(models.Model):
_inherit = 'external.file.task'
def _import_file_threaded(self, attach_obj, conn, file_name):
md5_datas = ''
with api.Environment.manage():
with odoo.registry(
self.env.cr.dbname).cursor() as new_cr:
new_env = api.Environment(new_cr, self.env.uid,
self.env.context)
try:
full_path = os.path.join(self.filepath, file_name)
file_data = conn.open(full_path, 'rb')
datas = file_data.read()
if self.md5_check:
md5_file = conn.open(full_path + '.md5', 'rb')
md5_datas = md5_file.read().rstrip('\r\n')
attach_vals = self._prepare_attachment_vals(
datas, file_name, md5_datas)
attachment = attach_obj.with_env(new_env).create(
attach_vals)
new_full_path = False
if self.after_import == 'rename':
new_name = self._template_render(
self.new_name, attachment)
new_full_path = os.path.join(
self.filepath, new_name)
elif self.after_import == 'move':
new_full_path = os.path.join(
self.move_path, file_name)
elif self.after_import == 'move_rename':
new_name = self._template_render(
self.new_name, attachment)
new_full_path = os.path.join(
self.move_path, new_name)
if new_full_path:
conn.rename(full_path, new_full_path)
if self.md5_check:
conn.rename(
full_path + '.md5',
new_full_path + '/md5')
if self.after_import == 'delete':
conn.remove(full_path)
if self.md5_check:
conn.remove(full_path + '.md5')
except Exception, e:
new_env.cr.rollback()
_logger.error('Error importing file %s '
'from %s: %s',
file_name,
self.filepath,
e)
# move on to process other files
else:
new_env.cr.commit()
@api.multi
def run_import(self):
self.ensure_one()
protocols = self.env['external.file.location']._get_classes()
cls = protocols.get(self.location_id.protocol)[1]
attach_obj = self.env['ir.attachment.metadata']
try:
connection = cls.connect(self.location_id)
with connection as conn:
try:
files = conn.listdir(path=self.filepath,
wildcard=self.filename or '',
files_only=True)
for file_name in files:
t = threading.Thread(target=self._import_file_threaded,
name=u'import_file' + file_name,
args=(attach_obj,
conn,
file_name))
t.start()
for t in _PDF_OCR_DOCUMENTS_THREADS:
t.join()
except:
_logger.error('Directory %s does not exist', self.filepath)
return
except:
_logger.error('Root directory %s does not exist', self.filepath)
return | thinkopensolutions/tkobr-addons | tko_document_ocr_external_file_location_threaded/models/task.py | Python | agpl-3.0 | 4,504 |
# Based on https://bitbucket.org/jokull/django-email-login/
import re
from uuid import uuid4
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from django.contrib.sites.models import RequestSite, Site
from registration import signals
from registration.models import RegistrationProfile
from invitation.backends import InvitationBackend
from forms import RegistrationForm
class RegistrationBackend(InvitationBackend):
"""
Does not require the user to pick a username. Sets the username to a random
string behind the scenes.
"""
def register(self, request, **kwargs):
email, password = kwargs['email'], kwargs['password1']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
new_user = RegistrationProfile.objects.create_inactive_user(
uuid4().get_hex()[:10], email, password, site)
signals.user_registered.send(
sender=self.__class__, user=new_user, request=request)
return new_user
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
email_re = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r'\\[\001-\011\013\014\016-\177])*"'
# domain
r')@(?:[A-Z0-9-]+\.)+[A-Z]{2,6}$', re.IGNORECASE)
class AuthBackend(ModelBackend):
"""Authenticate using email only"""
def authenticate(self, username=None, password=None, email=None):
if email is None:
email = username
if email_re.search(email):
user = User.objects.filter(email__iexact=email)
if user.count() > 0:
user = user[0]
if user.check_password(password):
return user
return None
| datagutten/comics | comics/accounts/backends.py | Python | agpl-3.0 | 2,013 |
# solution to Tokens 5, 6
think(0)
while object_here():
take()
move()
while carries_object():
put()
while not at_goal():
move()
| code4futuredotorg/reeborg_tw | test/src/tokens56_en.py | Python | agpl-3.0 | 144 |
# Copyright 2020 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
"name": "Product Template Attribute Value Menu",
"version": "12.0.1.1.0",
"license": "AGPL-3",
"author": "AvanzOSC",
"website": "http://www.avanzosc.es",
"category": "Sales",
"depends": [
"stock",
"sale_management"
],
"data": [
"security/product_template_attribute_value_menu_rules.xml",
"views/product_template_attribute_value_view.xml",
],
"installable": True,
}
| oihane/odoo-addons | product_template_attribute_value_menu/__manifest__.py | Python | agpl-3.0 | 558 |
# -*- coding: utf-8 -*-
# See README file for full copyright and licensing details.
from . import sale
from . import delivery
| rgbconsulting/rgb-sale | delivery_sale_cost/models/__init__.py | Python | agpl-3.0 | 127 |
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Christophe Lampin
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword
from weboob.capabilities.bill import Detail
from decimal import Decimal
from .pages import LoginPage, HomePage, AccountPage, HistoryPage, BillsPage
__all__ = ['AmeliProBrowser']
class AmeliProBrowser(BaseBrowser):
PROTOCOL = 'https'
DOMAIN = 'espacepro.ameli.fr'
ENCODING = None
PAGES = {'.*_pageLabel=vp_login_page.*': LoginPage,
'.*_pageLabel=vp_accueil.*': HomePage,
'.*_pageLabel=vp_coordonnees_infos_perso_page.*': AccountPage,
'.*_pageLabel=vp_recherche_par_date_paiements_page.*': HistoryPage,
'.*_pageLabel=vp_releves_mensuels_page.*': BillsPage,
}
loginp = '/PortailPS/appmanager/portailps/professionnelsante?_nfpb=true&_pageLabel=vp_login_page'
homep = '/PortailPS/appmanager/portailps/professionnelsante?_nfpb=true&_pageLabel=vp_accueil_book'
accountp = '/PortailPS/appmanager/portailps/professionnelsante?_nfpb=true&_pageLabel=vp_coordonnees_infos_perso_page'
billsp = '/PortailPS/appmanager/portailps/professionnelsante?_nfpb=true&_pageLabel=vp_releves_mensuels_page'
searchp = '/PortailPS/appmanager/portailps/professionnelsante?_nfpb=true&_pageLabel=vp_recherche_par_date_paiements_page'
historyp = '/PortailPS/appmanager/portailps/professionnelsante?_nfpb=true&_windowLabel=vp_recherche_paiement_tiers_payant_portlet_1&vp_recherche_paiement_tiers_payant_portlet_1_actionOverride=%2Fportlets%2Fpaiements%2Frecherche&_pageLabel=vp_recherche_par_date_paiements_page'
def home(self):
self.location(self.homep)
def is_logged(self):
if self.is_on_page(LoginPage):
return False
return True
def login(self):
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
if not self.is_on_page(LoginPage):
self.location(self.loginp)
self.page.login(self.username, self.password)
if self.is_on_page(LoginPage):
raise BrowserIncorrectPassword()
def get_subscription_list(self):
if not self.is_on_page(AccountPage):
self.location(self.accountp)
return self.page.get_subscription_list()
def get_subscription(self, id):
assert isinstance(id, basestring)
return self.get_subscription_list()
def iter_history(self, subscription):
if not self.is_on_page(HistoryPage):
self.location(self.searchp)
date_deb = self.page.document.xpath('//input[@name="vp_recherche_paiement_tiers_payant_portlet_1dateDebutRecherche"]')[0].value
date_fin = self.page.document.xpath('//input[@name="vp_recherche_paiement_tiers_payant_portlet_1dateFinRecherche"]')[0].value
data = {'vp_recherche_paiement_tiers_payant_portlet_1dateDebutRecherche': date_deb,
'vp_recherche_paiement_tiers_payant_portlet_1dateFinRecherche': date_fin,
'vp_recherche_paiement_tiers_payant_portlet_1codeOrganisme': 'null',
'vp_recherche_paiement_tiers_payant_portlet_1actionEvt': 'rechercheParDate',
'vp_recherche_paiement_tiers_payant_portlet_1codeRegime': '01',
}
self.location(self.historyp, urllib.urlencode(data))
return self.page.iter_history()
def get_details(self, sub):
det = Detail()
det.id = sub.id
det.label = sub.label
det.infos = ''
det.price = Decimal('0.0')
return det
def iter_bills(self):
if not self.is_on_page(BillsPage):
self.location(self.billsp)
return self.page.iter_bills()
def get_bill(self, id):
assert isinstance(id, basestring)
for b in self.get_bills():
if id == b.id:
return b
return None
| blckshrk/Weboob | modules/amelipro/browser.py | Python | agpl-3.0 | 4,595 |
"""
BigchainDB TYMLEZ Consensus Plugin
"""
from setuptools import setup
tests_require = [
'pytest',
'pep8',
'pylint',
'pytest',
]
dev_require = [
'ipdb',
'ipython',
]
docs_require = [
]
setup(
name='BigchainDB TYMLEZ Consensus Plugin',
version='0.0.2',
description='BigchainDB TYMLEZ Consensus Plugin',
long_description=__doc__,
url='https://github.com/tymlez/consensus',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha'
],
packages=[
'consensus_template'
],
entry_points={
'bigchaindb.consensus': [
'tymlezconsensus=consensus_template.consensus:ConsensusRulesTemplate'
]
},
install_requires=[
'bigchaindb>=0.10.0.dev'
],
setup_requires=['pytest-runner'],
tests_require=tests_require,
extras_require={
'test': tests_require,
'dev': dev_require + tests_require + docs_require,
'docs': docs_require,
},
)
| tymlez/consensus | setup.py | Python | agpl-3.0 | 1,000 |
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'product_extra_photo',
'version': '0.1',
'category': 'Customization',
'author': 'Micronaet s.r.l.',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': ['base','product'],
'init_xml': [],
'demo_xml': [],
'update_xml': [
'security/ir.model.access.csv',
#'product.xml',
#'wizard/wizard_import_view.xml',
],
'active': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-migration | __UNPORTED__/product_extra_photo/__openerp__.py | Python | agpl-3.0 | 1,570 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#.apidoc title: Object Relational Mapping
#.apidoc module-mods: member-order: bysource
"""
Object relational mapping to database (postgresql) module
* Hierarchical structure
* Constraints consistency, validations
* Object meta Data depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default fields value
* Permissions optimisation
* Persistant object: DB postgresql
* Datas conversions
* Multi-level caching system
* 2 different inheritancies
* Fields:
- classicals (varchar, integer, boolean, ...)
- relations (one2many, many2one, many2many)
- functions
"""
import babel.dates
import calendar
import collections
import copy
import datetime
import itertools
import logging
import operator
import pickle
import re
import simplejson
import time
import traceback
import types
import psycopg2
from lxml import etree
import fields
import openerp
import openerp.netsvc as netsvc
import openerp.tools as tools
from openerp.tools.config import config
from openerp.tools.misc import CountingStream
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from query import Query
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from openerp.tools import SKIPPED_ELEMENT_TYPES
regex_order = re.compile('^(([a-z0-9_]+|"[a-z0-9_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in (field.get("states",{})).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['tree_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``tree_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``tree_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, basestring):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', '{}')
test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
test_modifiers('<field name="a" required="1"/>', '{"required": true}')
test_modifiers('<field name="a" invisible="0"/>', '{}')
test_modifiers('<field name="a" readonly="0"/>', '{}')
test_modifiers('<field name="a" required="0"/>', '{}')
test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, '{}')
test_modifiers({"invisible": True}, '{"invisible": true}')
test_modifiers({"invisible": False}, '{}')
def check_object_name(name):
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
if regex_object_name.match(name) is None:
return False
return True
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
_logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
'RESTRICT': 'r',
'NO ACTION': 'a',
'CASCADE': 'c',
'SET NULL': 'n',
'SET DEFAULT': 'd',
}
def intersect(la, lb):
return filter(lambda x: x in lb, la)
def fix_import_export_id_paths(fieldname):
"""
Fixes the id fields in import and exports, and splits field paths
on '/'.
:param str fieldname: name of the field to import/export
:return: split field name
:rtype: list of str
"""
fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
return fixed_external_id.split('/')
class except_orm(Exception):
def __init__(self, name, value):
self.name = name
self.value = value
self.args = (name, value)
class BrowseRecordError(Exception):
pass
class browse_null(object):
""" Readonly python database object browser
"""
def __init__(self):
self.id = False
def __getitem__(self, name):
return None
def __getattr__(self, name):
return None # XXX: return self ?
def __int__(self):
return False
def __str__(self):
return ''
def __nonzero__(self):
return False
def __unicode__(self):
return u''
#
# TODO: execute an object method on browse_record_list
#
class browse_record_list(list):
""" Collection of browse objects
Such an instance will be returned when doing a ``browse([ids..])``
and will be iterable, yielding browse() objects
"""
def __init__(self, lst, context=None):
if not context:
context = {}
super(browse_record_list, self).__init__(lst)
self.context = context
class browse_record(object):
""" An object that behaves like a row of an object's table.
It has attributes after the columns of the corresponding object.
Examples::
uobj = pool.get('res.users')
user_rec = uobj.browse(cr, uid, 104)
name = user_rec.name
"""
def __init__(self, cr, uid, id, table, cache, context=None,
list_class=browse_record_list, fields_process=None):
"""
:param table: the browsed object (inherited from orm)
:param dict cache: a dictionary of model->field->data to be shared
across browse objects, thus reducing the SQL
read()s. It can speed up things a lot, but also be
disastrous if not discarded after write()/unlink()
operations
:param dict context: dictionary with an optional context
"""
if fields_process is None:
fields_process = {}
if context is None:
context = {}
self._list_class = list_class
self._cr = cr
self._uid = uid
self._id = id
self._table = table # deprecated, use _model!
self._model = table
self._table_name = self._table._name
self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name)
self._context = context
self._fields_process = fields_process
cache.setdefault(table._name, {})
self._data = cache[table._name]
# if not (id and isinstance(id, (int, long,))):
# raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
# if not table.exists(cr, uid, id, context):
# raise BrowseRecordError(_('Object %s does not exists') % (self,))
if id not in self._data:
self._data[id] = {'id': id}
self._cache = cache
def __getitem__(self, name):
if name == 'id':
return self._id
if name not in self._data[self._id]:
# build the list of fields we will fetch
# fetch the definition of the field which was asked for
if name in self._table._columns:
col = self._table._columns[name]
elif name in self._table._inherit_fields:
col = self._table._inherit_fields[name][2]
elif hasattr(self._table, str(name)):
attr = getattr(self._table, name)
if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)):
def function_proxy(*args, **kwargs):
if 'context' not in kwargs and self._context:
kwargs.update(context=self._context)
return attr(self._cr, self._uid, [self._id], *args, **kwargs)
return function_proxy
else:
return attr
else:
error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
self.__logger.warning(error_msg)
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug(''.join(traceback.format_stack()))
raise KeyError(error_msg)
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
if col._prefetch and not col.groups:
# gen the list of "local" (ie not inherited) fields which are classic or many2one
field_filter = lambda x: x[1]._classic_write and x[1]._prefetch and not x[1].groups
fields_to_fetch = filter(field_filter, self._table._columns.items())
# gen the list of inherited fields
inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
# complete the field list with the inherited fields which are classic or many2one
fields_to_fetch += filter(field_filter, inherits)
# otherwise we fetch only that field
else:
fields_to_fetch = [(name, col)]
ids = filter(lambda id: name not in self._data[id], self._data.keys())
# read the results
field_names = map(lambda x: x[0], fields_to_fetch)
field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
# TODO: improve this, very slow for reports
if self._fields_process:
lang = self._context.get('lang', 'en_US') or 'en_US'
lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)])
if not lang_obj_ids:
raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,))
lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0])
for field_name, field_column in fields_to_fetch:
if field_column._type in self._fields_process:
for result_line in field_values:
result_line[field_name] = self._fields_process[field_column._type](result_line[field_name])
if result_line[field_name]:
result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj)
if not field_values:
# Where did those ids come from? Perhaps old entries in ir_model_dat?
_logger.warning("No field_values found for ids %s in %s", ids, self)
raise KeyError('Field %s not found in %s'%(name, self))
# create browse records for 'remote' objects
for result_line in field_values:
new_data = {}
for field_name, field_column in fields_to_fetch:
if field_column._type == 'many2one':
if result_line[field_name]:
obj = self._table.pool.get(field_column._obj)
if isinstance(result_line[field_name], (list, tuple)):
value = result_line[field_name][0]
else:
value = result_line[field_name]
if value:
# FIXME: this happen when a _inherits object
# overwrite a field of it parent. Need
# testing to be sure we got the right
# object and not the parent one.
if not isinstance(value, browse_record):
if obj is None:
# In some cases the target model is not available yet, so we must ignore it,
# which is safe in most cases, this value will just be loaded later when needed.
# This situation can be caused by custom fields that connect objects with m2o without
# respecting module dependencies, causing relationships to be connected to soon when
# the target is not loaded yet.
continue
new_data[field_name] = browse_record(self._cr,
self._uid, value, obj, self._cache,
context=self._context,
list_class=self._list_class,
fields_process=self._fields_process)
else:
new_data[field_name] = value
else:
new_data[field_name] = browse_null()
else:
new_data[field_name] = browse_null()
elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
new_data[field_name] = self._list_class([browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj), self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process) for id in result_line[field_name]], self._context)
elif field_column._type == 'reference':
if result_line[field_name]:
if isinstance(result_line[field_name], browse_record):
new_data[field_name] = result_line[field_name]
else:
ref_obj, ref_id = result_line[field_name].split(',')
ref_id = long(ref_id)
if ref_id:
obj = self._table.pool.get(ref_obj)
new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
else:
new_data[field_name] = browse_null()
else:
new_data[field_name] = browse_null()
else:
new_data[field_name] = result_line[field_name]
self._data[result_line['id']].update(new_data)
if not name in self._data[self._id]:
# How did this happen? Could be a missing model due to custom fields used too soon, see above.
self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values)
self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table)
raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
return self._data[self._id][name]
def __getattr__(self, name):
try:
return self[name]
except KeyError, e:
raise AttributeError(e)
def __contains__(self, name):
return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
def __iter__(self):
raise NotImplementedError("Iteration is not allowed on %s" % self)
def __hasattr__(self, name):
return name in self
def __int__(self):
return self._id
def __str__(self):
return "browse_record(%s, %d)" % (self._table_name, self._id)
def __eq__(self, other):
if not isinstance(other, browse_record):
return False
return (self._table_name, self._id) == (other._table_name, other._id)
def __ne__(self, other):
if not isinstance(other, browse_record):
return True
return (self._table_name, self._id) != (other._table_name, other._id)
# we need to define __unicode__ even though we've already defined __str__
# because we have overridden __getattr__
def __unicode__(self):
return unicode(str(self))
def __hash__(self):
return hash((self._table_name, self._id))
__repr__ = __str__
def refresh(self):
"""Force refreshing this browse_record's data and all the data of the
records that belong to the same cache, by emptying the cache completely,
preserving only the record identifiers (for prefetching optimizations).
"""
for model, model_cache in self._cache.iteritems():
# only preserve the ids of the records that were in the cache
cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()])
self._cache[model].clear()
self._cache[model].update(cached_ids)
def pg_varchar(size=0):
""" Returns the VARCHAR declaration for the provided size:
* If no size (or an empty or negative size is provided) return an
'infinite' VARCHAR
* Otherwise return a VARCHAR(n)
:type int size: varchar size, optional
:rtype: str
"""
if size:
if not isinstance(size, int):
raise TypeError("VARCHAR parameter should be an int, got %s"
% type(size))
if size > 0:
return 'VARCHAR(%d)' % size
return 'VARCHAR'
FIELDS_TO_PGTYPES = {
fields.boolean: 'bool',
fields.integer: 'int4',
fields.text: 'text',
fields.html: 'text',
fields.date: 'date',
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
"""
:param fields._column f: field to get a Postgres type for
:param type type_override: use the provided type for dispatching instead of the field's own type
:returns: (postgres_identification_type, postgres_type_specification)
:rtype: (str, str)
"""
field_type = type_override or type(f)
if field_type in FIELDS_TO_PGTYPES:
pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
elif issubclass(field_type, fields.float):
if f.digits:
pg_type = ('numeric', 'NUMERIC')
else:
pg_type = ('float8', 'DOUBLE PRECISION')
elif issubclass(field_type, (fields.char, fields.reference)):
pg_type = ('varchar', pg_varchar(f.size))
elif issubclass(field_type, fields.selection):
if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
or getattr(f, 'size', None) == -1:
pg_type = ('int4', 'INTEGER')
else:
pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
elif issubclass(field_type, fields.function):
if f._type == 'selection':
pg_type = ('varchar', pg_varchar())
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
_logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
class MetaModel(type):
""" Metaclass for the Model.
This class is used as the metaclass for the Model class to discover
the models defined in a module (i.e. without instanciating them).
If the automatic discovery is not needed, it is possible to set the
model's _register attribute to False.
"""
module_to_models = {}
def __init__(self, name, bases, attrs):
if not self._register:
self._register = True
super(MetaModel, self).__init__(name, bases, attrs)
return
# The (OpenERP) module name can be in the `openerp.addons` namespace
# or not. For instance module `sale` can be imported as
# `openerp.addons.sale` (the good way) or `sale` (for backward
# compatibility).
module_parts = self.__module__.split('.')
if len(module_parts) > 2 and module_parts[0] == 'openerp' and \
module_parts[1] == 'addons':
module_name = self.__module__.split('.')[2]
else:
module_name = self.__module__.split('.')[0]
if not hasattr(self, '_module'):
self._module = module_name
# Remember which models to instanciate for this module.
if not self._custom:
self.module_to_models.setdefault(self._module, []).append(self)
# Definition of log access columns, automatically added to models if
# self._log_access is True
LOG_ACCESS_COLUMNS = {
'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
'create_date': 'TIMESTAMP',
'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
'write_date': 'TIMESTAMP'
}
# special columns automatically created by the ORM
MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys()
class BaseModel(object):
""" Base class for OpenERP models.
OpenERP models are created by inheriting from this class' subclasses:
* Model: for regular database-persisted models
* TransientModel: for temporary data, stored in the database but automatically
vaccuumed every so often
* AbstractModel: for abstract super classes meant to be shared by multiple
_inheriting classes (usually Models or TransientModels)
The system will later instantiate the class once per database (on
which the class' module is installed).
To create a class that should not be instantiated, the _register class attribute
may be set to False.
"""
__metaclass__ = MetaModel
_auto = True # create database backend
_register = False # Set to false if the model shouldn't be automatically discovered.
_name = None
_columns = {}
_constraints = []
_custom = False
_defaults = {}
_rec_name = None
_parent_name = 'parent_id'
_parent_store = False
_parent_order = False
_date_name = 'date'
_order = 'id'
_sequence = None
_description = None
_needaction = False
# dict of {field:method}, with method returning the (name_get of records, {id: fold})
# to include in the _read_group, if grouped on this field
_group_by_full = {}
# Transience
_transient = False # True in a TransientModel
# structure:
# { 'parent_model': 'm2o_field', ... }
_inherits = {}
# Mapping from inherits'd field name to triple (m, r, f, n) where m is the
# model from which it is inherits'd, r is the (local) field towards m, f
# is the _column object itself, and n is the original (i.e. top-most)
# parent model.
# Example:
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
_inherit_fields = {}
# Mapping field name/column_info object
# This is similar to _inherit_fields but:
# 1. includes self fields,
# 2. uses column_info instead of a triple.
_all_columns = {}
_table = None
_invalids = set()
_log_create = False
_sql_constraints = []
_protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
CONCURRENCY_CHECK_FIELD = '__last_update'
def log(self, cr, uid, id, message, secondary=False, context=None):
return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
def view_init(self, cr, uid, fields_list, context=None):
"""Override this method to do specific things when a view on the object is opened."""
pass
def _field_create(self, cr, context=None):
""" Create entries in ir_model_fields for all the model's fields.
If necessary, also create an entry in ir_model, and if called from the
modules loading scheme (by receiving 'module' in the context), also
create entries in ir_model_data (for the model and the fields).
- create an entry in ir_model (if there is not already one),
- create an entry in ir_model_data (if there is not already one, and if
'module' is in the context),
- update ir_model_fields with the fields found in _columns
(TODO there is some redundancy as _columns is updated from
ir_model_fields in __init__).
"""
if context is None:
context = {}
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
model_id = cr.fetchone()[0]
cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
else:
model_id = cr.fetchone()[0]
if 'module' in context:
name_id = 'model_'+self._name.replace('.', '_')
cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
if not cr.rowcount:
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
)
cr.commit()
cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
cols = {}
for rec in cr.dictfetchall():
cols[rec['name']] = rec
ir_model_fields_obj = self.pool.get('ir.model.fields')
# sparse field should be created at the end, as it depends on its serialized field already existing
model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'name': k,
'field_description': f.string,
'ttype': f._type,
'relation': f._obj or '',
'view_load': (f.view_load and 1) or 0,
'select_level': tools.ustr(f.select or 0),
'readonly': (f.readonly and 1) or 0,
'required': (f.required and 1) or 0,
'selectable': (f.selectable and 1) or 0,
'translate': (f.translate and 1) or 0,
'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
'serialization_field_id': None,
}
if getattr(f, 'serialization_field', None):
# resolve link to serialization_field if specified by name
serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
if not serialization_field_id:
raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
vals['serialization_field_id'] = serialization_field_id[0]
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['select_level'] = context.get('select', '0')
#setting value to let the problem NOT occur next time
elif k in cols:
vals['select_level'] = cols[k]['select_level']
if k not in cols:
cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
id = cr.fetchone()[0]
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
relation,view_load,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], bool(vals['view_load']), 'base',
vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.execute("select name from ir_model_data where name=%s", (name1,))
if cr.fetchone():
name1 = name1 + "_" + str(id)
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name1, context['module'], 'ir.model.fields', id)
)
else:
for key, val in vals.items():
if cols[k][key] != vals[key]:
cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
cr.commit()
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
view_load=%s, select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'], bool(vals['view_load']),
vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
cr.commit()
#
# Goal: try to apply inheritance at the instanciation level and
# put objects in the pool var
#
@classmethod
def create_instance(cls, pool, cr):
""" Instanciate a given model.
This class method instanciates the class of some model (i.e. a class
deriving from osv or osv_memory). The class might be the class passed
in argument or, if it inherits from another class, a class constructed
by combining the two classes.
The ``attributes`` argument specifies which parent class attributes
have to be combined.
TODO: the creation of the combined class is repeated at each call of
this method. This is probably unnecessary.
"""
attributes = ['_columns', '_defaults', '_inherits', '_constraints',
'_sql_constraints']
parent_names = getattr(cls, '_inherit', None)
if parent_names:
if isinstance(parent_names, (str, unicode)):
name = cls._name or parent_names
parent_names = [parent_names]
else:
name = cls._name
if not name:
raise TypeError('_name is mandatory in case of multiple inheritance')
for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
parent_model = pool.get(parent_name)
if not parent_model:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
if not getattr(cls, '_original_module', None) and name == parent_model._name:
cls._original_module = parent_model._original_module
parent_class = parent_model.__class__
nattr = {}
for s in attributes:
new = copy.copy(getattr(parent_model, s, {}))
if s == '_columns':
# Don't _inherit custom fields.
for c in new.keys():
if new[c].manual:
del new[c]
# Duplicate float fields because they have a .digits
# cache (which must be per-registry, not server-wide).
for c in new.keys():
if new[c]._type == 'float':
new[c] = copy.copy(new[c])
if hasattr(new, 'update'):
new.update(cls.__dict__.get(s, {}))
elif s=='_constraints':
for c in cls.__dict__.get(s, []):
exist = False
for c2 in range(len(new)):
#For _constraints, we should check field and methods as well
if new[c2][2]==c[2] and (new[c2][0] == c[0] \
or getattr(new[c2][0],'__name__', True) == \
getattr(c[0],'__name__', False)):
# If new class defines a constraint with
# same function name, we let it override
# the old one.
new[c2] = c
exist = True
break
if not exist:
new.append(c)
else:
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
# Keep links to non-inherited constraints, e.g. useful when exporting translations
nattr['_local_constraints'] = cls.__dict__.get('_constraints', [])
nattr['_local_sql_constraints'] = cls.__dict__.get('_sql_constraints', [])
cls = type(name, (cls, parent_class), dict(nattr, _register=False))
else:
cls._local_constraints = getattr(cls, '_constraints', [])
cls._local_sql_constraints = getattr(cls, '_sql_constraints', [])
if not getattr(cls, '_original_module', None):
cls._original_module = cls._module
obj = object.__new__(cls)
obj.__init__(pool, cr)
return obj
def __new__(cls):
"""Register this model.
This doesn't create an instance but simply register the model
as being part of the module where it is defined.
"""
# Set the module name (e.g. base, sale, accounting, ...) on the class.
module = cls.__module__.split('.')[0]
if not hasattr(cls, '_module'):
cls._module = module
# Record this class in the list of models to instantiate for this module,
# managed by the metaclass.
module_model_list = MetaModel.module_to_models.setdefault(cls._module, [])
if cls not in module_model_list:
if not cls._custom:
module_model_list.append(cls)
# Since we don't return an instance here, the __init__
# method won't be called.
return None
def __init__(self, pool, cr):
""" Initialize a model and make it part of the given registry.
- copy the stored fields' functions in the osv_pool,
- update the _columns with the fields found in ir_model_fields,
- ensure there is a many2one for each _inherits'd parent,
- update the children's _columns,
- give a chance to each field to initialize itself.
"""
pool.add(self._name, self)
self.pool = pool
if not self._name and not hasattr(self, '_inherit'):
name = type(self).__name__.split('.')[0]
msg = "The class %s has to have a _name attribute" % name
_logger.error(msg)
raise except_orm('ValueError', msg)
if not self._description:
self._description = self._name
if not self._table:
self._table = self._name.replace('.', '_')
if not hasattr(self, '_log_access'):
# If _log_access is not specified, it is the same value as _auto.
self._log_access = getattr(self, "_auto", True)
self._columns = self._columns.copy()
for store_field in self._columns:
f = self._columns[store_field]
if hasattr(f, 'digits_change'):
f.digits_change(cr)
def not_this_field(stored_func):
x, y, z, e, f, l = stored_func
return x != self._name or y != store_field
self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, []))
if not isinstance(f, fields.function):
continue
if not f.store:
continue
sm = f.store
if sm is True:
sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, 10, None)}
for object, aa in sm.items():
if len(aa) == 4:
(fnct, fields2, order, length) = aa
elif len(aa) == 3:
(fnct, fields2, order) = aa
length = None
else:
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
self.pool._store_function.setdefault(object, [])
t = (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)
if not t in self.pool._store_function[object]:
self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
for (key, _, msg) in self._sql_constraints:
self.pool._sql_error[self._table+'_'+key] = msg
# Load manual fields
# Check the query is already done for all modules of if we need to
# do it ourselves.
if self.pool.fields_by_model is not None:
manual_fields = self.pool.fields_by_model.get(self._name, [])
else:
cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual'))
manual_fields = cr.dictfetchall()
for field in manual_fields:
if field['name'] in self._columns:
continue
attrs = {
'string': field['field_description'],
'required': bool(field['required']),
'readonly': bool(field['readonly']),
'domain': eval(field['domain']) if field['domain'] else None,
'size': field['size'] or None,
'ondelete': field['on_delete'],
'translate': (field['translate']),
'manual': True,
#'select': int(field['select_level'])
}
if field['serialization_field_id']:
cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
if field['ttype'] in ['many2one', 'one2many', 'many2many']:
attrs.update({'relation': field['relation']})
self._columns[field['name']] = fields.sparse(**attrs)
elif field['ttype'] == 'selection':
self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
elif field['ttype'] == 'reference':
self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
elif field['ttype'] == 'many2one':
self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
elif field['ttype'] == 'one2many':
self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
elif field['ttype'] == 'many2many':
_rel1 = field['relation'].replace('.', '_')
_rel2 = field['model'].replace('.', '_')
_rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
else:
self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
self._inherits_check()
self._inherits_reload()
if not self._sequence:
self._sequence = self._table + '_id_seq'
for k in self._defaults:
assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,)
for f in self._columns:
self._columns[f].restart()
# Transience
if self.is_transient():
self._transient_check_count = 0
self._transient_max_count = config.get('osv_memory_count_limit')
self._transient_max_hours = config.get('osv_memory_age_limit')
assert self._log_access, "TransientModels must have log_access turned on, "\
"in order to implement their access rights policy"
# Validate rec_name
if self._rec_name is not None:
assert self._rec_name in self._all_columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name)
else:
self._rec_name = 'name'
def __export_row(self, cr, uid, row, fields, context=None):
if context is None:
context = {}
def check_type(field_type):
if field_type == 'float':
return 0.0
elif field_type == 'integer':
return 0
elif field_type == 'boolean':
return 'False'
return ''
def selection_field(in_field):
col_obj = self.pool.get(in_field.keys()[0])
if f[i] in col_obj._columns.keys():
return col_obj._columns[f[i]]
elif f[i] in col_obj._inherits.keys():
selection_field(col_obj._inherits)
else:
return False
def _get_xml_id(self, cr, uid, r):
model_data = self.pool.get('ir.model.data')
data_ids = model_data.search(cr, uid, [('model', '=', r._table_name), ('res_id', '=', r['id'])])
if len(data_ids):
d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
if d['module']:
r = '%s.%s' % (d['module'], d['name'])
else:
r = d['name']
else:
postfix = 0
while True:
n = self._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
if not model_data.search(cr, uid, [('name', '=', n)]):
break
postfix += 1
model_data.create(cr, SUPERUSER_ID, {
'name': n,
'model': self._name,
'res_id': r['id'],
'module': '__export__',
})
r = '__export__.'+n
return r
lines = []
data = map(lambda x: '', range(len(fields)))
done = []
for fpos in range(len(fields)):
f = fields[fpos]
if f:
r = row
i = 0
while i < len(f):
cols = False
if f[i] == '.id':
r = r['id']
elif f[i] == 'id':
r = _get_xml_id(self, cr, uid, r)
else:
r = r[f[i]]
# To display external name of selection field when its exported
if f[i] in self._columns.keys():
cols = self._columns[f[i]]
elif f[i] in self._inherit_fields.keys():
cols = selection_field(self._inherits)
if cols and cols._type == 'selection':
sel_list = cols.selection
if r and type(sel_list) == type([]):
r = [x[1] for x in sel_list if r==x[0]]
r = r and r[0] or False
if not r:
if f[i] in self._columns:
r = check_type(self._columns[f[i]]._type)
elif f[i] in self._inherit_fields:
r = check_type(self._inherit_fields[f[i]][2]._type)
data[fpos] = r or False
break
if isinstance(r, (browse_record_list, list)):
first = True
fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \
or [], fields)
if fields2 in done:
if [x for x in fields2 if x]:
break
done.append(fields2)
if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'):
data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r])
break
for row2 in r:
lines2 = row2._model.__export_row(cr, uid, row2, fields2,
context)
if first:
for fpos2 in range(len(fields)):
if lines2 and lines2[0][fpos2]:
data[fpos2] = lines2[0][fpos2]
if not data[fpos]:
dt = ''
for rr in r:
name_relation = self.pool.get(rr._table_name)._rec_name
if isinstance(rr[name_relation], browse_record):
rr = rr[name_relation]
rr_name = self.pool.get(rr._table_name).name_get(cr, uid, [rr.id], context=context)
rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
dt += tools.ustr(rr_name or '') + ','
data[fpos] = dt[:-1]
break
lines += lines2[1:]
first = False
else:
lines += lines2
break
i += 1
if i == len(f):
if isinstance(r, browse_record):
r = self.pool.get(r._table_name).name_get(cr, uid, [r.id], context=context)
r = r and r[0] and r[0][1] or ''
data[fpos] = tools.ustr(r or '')
return [data] + lines
def export_data(self, cr, uid, ids, fields_to_export, context=None):
"""
Export fields for selected objects
:param cr: database cursor
:param uid: current user id
:param ids: list of ids
:param fields_to_export: list of fields
:param context: context arguments, like lang, time zone
:rtype: dictionary with a *datas* matrix
This method is used when exporting data via client menu
"""
if context is None:
context = {}
cols = self._columns.copy()
for f in self._inherit_fields:
cols.update({f: self._inherit_fields[f][2]})
fields_to_export = map(fix_import_export_id_paths, fields_to_export)
datas = []
for row in self.browse(cr, uid, ids, context):
datas += self.__export_row(cr, uid, row, fields_to_export, context)
return {'datas': datas}
def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
"""
.. deprecated:: 7.0
Use :meth:`~load` instead
Import given data in given module
This method is used when importing data via client menu.
Example of fields to import for a sale.order::
.id, (=database_id)
partner_id, (=name_search)
order_line/.id, (=database_id)
order_line/name,
order_line/product_id/id, (=xml id)
order_line/price_unit,
order_line/product_uom_qty,
order_line/product_uom/id (=xml_id)
This method returns a 4-tuple with the following structure::
(return_code, errored_resource, error_message, unused)
* The first item is a return code, it is ``-1`` in case of
import error, or the last imported row number in case of success
* The second item contains the record data dict that failed to import
in case of error, otherwise it's 0
* The third item contains an error message string in case of error,
otherwise it's 0
* The last item is currently unused, with no specific semantics
:param fields: list of fields to import
:param datas: data to import
:param mode: 'init' or 'update' for record creation
:param current_module: module name
:param noupdate: flag for record creation
:param filename: optional file to store partial import state for recovery
:returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
:rtype: (int, dict or 0, str or 0, str or 0)
"""
context = dict(context) if context is not None else {}
context['_import_current_module'] = current_module
fields = map(fix_import_export_id_paths, fields)
ir_model_data_obj = self.pool.get('ir.model.data')
def log(m):
if m['type'] == 'error':
raise Exception(m['message'])
if config.get('import_partial') and filename:
with open(config.get('import_partial'), 'rb') as partial_import_file:
data = pickle.load(partial_import_file)
position = data.get(filename, 0)
position = 0
try:
for res_id, xml_id, res, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, datas,
context=context, log=log),
context=context, log=log):
ir_model_data_obj._update(cr, uid, self._name,
current_module, res, mode=mode, xml_id=xml_id,
noupdate=noupdate, res_id=res_id, context=context)
position = info.get('rows', {}).get('to', 0) + 1
if config.get('import_partial') and filename and (not (position%100)):
with open(config.get('import_partial'), 'rb') as partial_import:
data = pickle.load(partial_import)
data[filename] = position
with open(config.get('import_partial'), 'wb') as partial_import:
pickle.dump(data, partial_import)
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
cr.commit()
except Exception, e:
cr.rollback()
return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
return position, 0, 0, 0
def load(self, cr, uid, fields, data, context=None):
"""
Attempts to load the data matrix, and returns a list of ids (or
``False`` if there was an error and no id could be generated) and a
list of messages.
The ids are those of the records created and saved (in database), in
the same order they were extracted from the file. They can be passed
directly to :meth:`~read`
:param fields: list of fields to import, at the same index as the corresponding data
:type fields: list(str)
:param data: row-major matrix of data to import
:type data: list(list(str))
:param dict context:
:returns: {ids: list(int)|False, messages: [Message]}
"""
cr.execute('SAVEPOINT model_load')
messages = []
fields = map(fix_import_export_id_paths, fields)
ModelData = self.pool['ir.model.data'].clear_caches()
fg = self.fields_get(cr, uid, context=context)
mode = 'init'
current_module = ''
noupdate = False
ids = []
for id, xid, record, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, data,
context=context, log=messages.append),
context=context, log=messages.append):
try:
cr.execute('SAVEPOINT model_load_save')
except psycopg2.InternalError, e:
# broken transaction, exit and hope the source error was
# already logged
if not any(message['type'] == 'error' for message in messages):
messages.append(dict(info, type='error',message=
u"Unknown database error: '%s'" % e))
break
try:
ids.append(ModelData._update(cr, uid, self._name,
current_module, record, mode=mode, xml_id=xid,
noupdate=noupdate, res_id=id, context=context))
cr.execute('RELEASE SAVEPOINT model_load_save')
except psycopg2.Warning, e:
messages.append(dict(info, type='warning', message=str(e)))
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except psycopg2.Error, e:
messages.append(dict(
info, type='error',
**PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
# Failed to write, log to messages, rollback savepoint (to
# avoid broken transaction) and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
if any(message['type'] == 'error' for message in messages):
cr.execute('ROLLBACK TO SAVEPOINT model_load')
ids = False
return {'ids': ids, 'messages': messages}
def _extract_records(self, cr, uid, fields_, data,
context=None, log=lambda a: None):
""" Generates record dicts from the data sequence.
The result is a generator of dicts mapping field names to raw
(unconverted, unvalidated) values.
For relational fields, if sub-fields were provided the value will be
a list of sub-records
The following sub-fields may be set on the record (by key):
* None is the name_get for the record (to use with name_create/name_search)
* "id" is the External ID for the record
* ".id" is the Database ID for the record
"""
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
# Fake columns to avoid special cases in extractor
columns[None] = fields.char('rec_name')
columns['id'] = fields.char('External ID')
columns['.id'] = fields.integer('Database ID')
# m2o fields can't be on multiple lines so exclude them from the
# is_relational field rows filter, but special-case it later on to
# be handled with relational fields (as it can have subfields)
is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
get_o2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type == 'one2many'])
get_nono2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type != 'one2many'])
# Checks if the provided row has any non-empty non-relational field
def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
return any(g(row)) and not any(f(row))
index = 0
while True:
if index >= len(data): return
row = data[index]
# copy non-relational fields to record dict
record = dict((field[0], value)
for field, value in itertools.izip(fields_, row)
if not is_relational(field[0]))
# Get all following rows which have relational values attached to
# the current record (no non-relational values)
record_span = itertools.takewhile(
only_o2m_values, itertools.islice(data, index + 1, None))
# stitch record row back on for relational fields
record_span = list(itertools.chain([row], record_span))
for relfield in set(
field[0] for field in fields_
if is_relational(field[0])):
column = columns[relfield]
# FIXME: how to not use _obj without relying on fields_get?
Model = self.pool[column._obj]
# get only cells for this sub-field, should be strictly
# non-empty, field path [None] is for name_get column
indices, subfields = zip(*((index, field[1:] or [None])
for index, field in enumerate(fields_)
if field[0] == relfield))
# return all rows which have at least one value for the
# subfields of relfield
relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
record[relfield] = [subrecord
for subrecord, _subinfo in Model._extract_records(
cr, uid, subfields, relfield_data,
context=context, log=log)]
yield record, {'rows': {
'from': index,
'to': index + len(record_span) - 1
}}
index += len(record_span)
def _convert_records(self, cr, uid, records,
context=None, log=lambda a: None):
""" Converts records from the source iterable (recursive dicts of
strings) into forms which can be written to the database (via
self.create or (ir.model.data)._update)
:returns: a list of triplets of (id, xid, record)
:rtype: list((int|None, str|None, dict))
"""
if context is None: context = {}
Converter = self.pool['ir.fields.converter']
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
Translation = self.pool['ir.translation']
field_names = dict(
(f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
context.get('lang'))
or column.string))
for f, column in columns.iteritems())
convert = Converter.for_model(cr, uid, self, context=context)
def _log(base, field, exception):
type = 'warning' if isinstance(exception, Warning) else 'error'
# logs the logical (not human-readable) field name for automated
# processing of response, but injects human readable in message
record = dict(base, type=type, field=field,
message=unicode(exception.args[0]) % base)
if len(exception.args) > 1 and exception.args[1]:
record.update(exception.args[1])
log(record)
stream = CountingStream(records)
for record, extras in stream:
dbid = False
xid = False
# name_get/name_create
if None in record: pass
# xid
if 'id' in record:
xid = record['id']
# dbid
if '.id' in record:
try:
dbid = int(record['.id'])
except ValueError:
# in case of overridden id column
dbid = record['.id']
if not self.search(cr, uid, [('id', '=', dbid)], context=context):
log(dict(extras,
type='error',
record=stream.index,
field='.id',
message=_(u"Unknown database identifier '%s'") % dbid))
dbid = False
converted = convert(record, lambda field, err:\
_log(dict(extras, record=stream.index, field=field_names[field]), field, err))
yield dbid, xid, converted, dict(extras, record=stream.index)
def get_invalid_fields(self, cr, uid):
return list(self._invalids)
def _validate(self, cr, uid, ids, context=None):
context = context or {}
lng = context.get('lang')
trans = self.pool.get('ir.translation')
error_msgs = []
for constraint in self._constraints:
fun, msg, fields = constraint
# We don't pass around the context here: validation code
# must always yield the same results.
if not fun(self, cr, uid, ids):
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
if hasattr(msg, '__call__'):
tmp_msg = msg(self, cr, uid, ids, context=context)
if isinstance(tmp_msg, tuple):
tmp_msg, params = tmp_msg
translated_msg = tmp_msg % params
else:
translated_msg = tmp_msg
else:
translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
error_msgs.append(
_("Error occurred while validating the field(s) %s: %s") % (','.join(fields), translated_msg)
)
self._invalids.update(fields)
if error_msgs:
raise except_orm('ValidateError', '\n'.join(error_msgs))
else:
self._invalids.clear()
def default_get(self, cr, uid, fields_list, context=None):
"""
Returns default values for the fields in fields_list.
:param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
:type fields_list: list
:param context: optional context dictionary - it may contains keys for specifying certain options
like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call.
It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set
or override a default value for a field.
A special ``bin_size`` boolean flag may also be passed in the context to request the
value of all fields.binary columns to be returned as the size of the binary instead of its
contents. This can also be selectively overriden by passing a field-specific flag
in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
:return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
"""
# trigger view init hook
self.view_init(cr, uid, fields_list, context)
if not context:
context = {}
defaults = {}
# get the default values for the inherited fields
for t in self._inherits.keys():
defaults.update(self.pool.get(t).default_get(cr, uid, fields_list,
context))
# get the default values defined in the object
for f in fields_list:
if f in self._defaults:
if callable(self._defaults[f]):
defaults[f] = self._defaults[f](self, cr, uid, context)
else:
defaults[f] = self._defaults[f]
fld_def = ((f in self._columns) and self._columns[f]) \
or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
or False
if isinstance(fld_def, fields.property):
property_obj = self.pool.get('ir.property')
prop_value = property_obj.get(cr, uid, f, self._name, context=context)
if prop_value:
if isinstance(prop_value, (browse_record, browse_null)):
defaults[f] = prop_value.id
else:
defaults[f] = prop_value
else:
if f not in defaults:
defaults[f] = False
# get the default values set by the user and override the default
# values defined in the object
ir_values_obj = self.pool.get('ir.values')
res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
for id, field, field_value in res:
if field in fields_list:
fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
if fld_def._type == 'many2one':
obj = self.pool.get(fld_def._obj)
if not obj.search(cr, uid, [('id', '=', field_value or False)]):
continue
if fld_def._type == 'many2many':
obj = self.pool.get(fld_def._obj)
field_value2 = []
for i in range(len(field_value or [])):
if not obj.search(cr, uid, [('id', '=',
field_value[i])]):
continue
field_value2.append(field_value[i])
field_value = field_value2
if fld_def._type == 'one2many':
obj = self.pool.get(fld_def._obj)
field_value2 = []
for i in range(len(field_value or [])):
field_value2.append({})
for field2 in field_value[i]:
if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
obj2 = self.pool.get(obj._columns[field2]._obj)
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
continue
elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type == 'many2one':
obj2 = self.pool.get(obj._inherit_fields[field2][2]._obj)
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
continue
# TODO add test for many2many and one2many
field_value2[i][field2] = field_value[i][field2]
field_value = field_value2
defaults[field] = field_value
# get the default values from the context
for key in context or {}:
if key.startswith('default_') and (key[8:] in fields_list):
defaults[key[8:]] = context[key]
return defaults
def fields_get_keys(self, cr, user, context=None):
res = self._columns.keys()
# TODO I believe this loop can be replace by
# res.extend(self._inherit_fields.key())
for parent in self._inherits:
res.extend(self.pool.get(parent).fields_get_keys(cr, user, context))
return res
def _rec_name_fallback(self, cr, uid, context=None):
rec_name = self._rec_name
if rec_name not in self._columns:
rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
return rec_name
#
# Overload this method if you need a window title which depends on the context
#
def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
return False
def user_has_groups(self, cr, uid, groups, context=None):
"""Return true if the user is at least member of one of the groups
in groups_str. Typically used to resolve ``groups`` attribute
in view and model definitions.
:param str groups: comma-separated list of fully-qualified group
external IDs, e.g.: ``base.group_user,base.group_system``
:return: True if the current user is a member of one of the
given groups
"""
return any([self.pool.get('res.users').has_group(cr, uid, group_ext_id)
for group_ext_id in groups.split(',')])
def __view_look_dom(self, cr, user, node, view_id, in_tree_view, model_fields, context=None):
"""Return the description of the fields in the node.
In a normal call to this method, node is a complete view architecture
but it is actually possible to give some sub-node (this is used so
that the method can call itself recursively).
Originally, the field descriptions are drawn from the node itself.
But there is now some code calling fields_get() in order to merge some
of those information in the architecture.
"""
if context is None:
context = {}
result = False
fields = {}
children = True
modifiers = {}
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
def check_group(node):
"""Apply group restrictions, may be set at view level or model level::
* at view level this means the element should be made invisible to
people who are not members
* at model level (exclusively for fields, obviously), this means
the field should be completely removed from the view, as it is
completely unavailable for non-members
:return: True if field should be included in the result of fields_view_get
"""
if node.tag == 'field' and node.get('name') in self._all_columns:
column = self._all_columns[node.get('name')].column
if column.groups and not self.user_has_groups(cr, user,
groups=column.groups,
context=context):
node.getparent().remove(node)
fields.pop(node.get('name'), None)
# no point processing view-level ``groups`` anymore, return
return False
if node.get('groups'):
can_see = self.user_has_groups(cr, user,
groups=node.get('groups'),
context=context)
if not can_see:
node.set('invisible', '1')
modifiers['invisible'] = True
if 'attrs' in node.attrib:
del(node.attrib['attrs']) #avoid making field visible later
del(node.attrib['groups'])
return True
if node.tag in ('field', 'node', 'arrow'):
if node.get('object'):
attrs = {}
views = {}
xml = "<form>"
for f in node:
if f.tag == 'field':
xml += etree.tostring(f, encoding="utf-8")
xml += "</form>"
new_xml = etree.fromstring(encode(xml))
ctx = context.copy()
ctx['base_model_name'] = self._name
xarch, xfields = self.pool.get(node.get('object')).__view_look_dom_arch(cr, user, new_xml, view_id, ctx)
views['form'] = {
'arch': xarch,
'fields': xfields
}
attrs = {'views': views}
fields = xfields
if node.get('name'):
attrs = {}
try:
if node.get('name') in self._columns:
column = self._columns[node.get('name')]
else:
column = self._inherit_fields[node.get('name')][2]
except Exception:
column = False
if column:
relation = self.pool.get(column._obj)
children = False
views = {}
for f in node:
if f.tag in ('form', 'tree', 'graph', 'kanban'):
node.remove(f)
ctx = context.copy()
ctx['base_model_name'] = self._name
xarch, xfields = relation.__view_look_dom_arch(cr, user, f, view_id, ctx)
views[str(f.tag)] = {
'arch': xarch,
'fields': xfields
}
attrs = {'views': views}
if node.get('widget') and node.get('widget') == 'selection':
# Prepare the cached selection list for the client. This needs to be
# done even when the field is invisible to the current user, because
# other events could need to change its value to any of the selectable ones
# (such as on_change events, refreshes, etc.)
# If domain and context are strings, we keep them for client-side, otherwise
# we evaluate them server-side to consider them when generating the list of
# possible values
# TODO: find a way to remove this hack, by allow dynamic domains
dom = []
if column._domain and not isinstance(column._domain, basestring):
dom = list(column._domain)
dom += eval(node.get('domain', '[]'), {'uid': user, 'time': time})
search_context = dict(context)
if column._context and not isinstance(column._context, basestring):
search_context.update(column._context)
attrs['selection'] = relation._name_search(cr, user, '', dom, context=search_context, limit=None, name_get_uid=1)
if (node.get('required') and not int(node.get('required'))) or not column.required:
attrs['selection'].append((False, ''))
fields[node.get('name')] = attrs
field = model_fields.get(node.get('name'))
if field:
transfer_field_to_modifiers(field, modifiers)
elif node.tag in ('form', 'tree'):
result = self.view_header_get(cr, user, False, node.tag, context)
if result:
node.set('string', result)
in_tree_view = node.tag == 'tree'
elif node.tag == 'calendar':
for additional_field in ('date_start', 'date_delay', 'date_stop', 'color'):
if node.get(additional_field):
fields[node.get(additional_field)] = {}
if not check_group(node):
# node must be removed, no need to proceed further with its children
return fields
# The view architeture overrides the python model.
# Get the attrs before they are (possibly) deleted by check_group below
transfer_node_to_modifiers(node, modifiers, context, in_tree_view)
# TODO remove attrs couterpart in modifiers when invisible is true ?
# translate view
if 'lang' in context:
if node.text and node.text.strip():
trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.text.strip())
if trans:
node.text = node.text.replace(node.text.strip(), trans)
if node.tail and node.tail.strip():
trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.tail.strip())
if trans:
node.tail = node.tail.replace(node.tail.strip(), trans)
if node.get('string') and not result:
trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], node.get('string'))
if trans == node.get('string') and ('base_model_name' in context):
# If translation is same as source, perhaps we'd have more luck with the alternative model name
# (in case we are in a mixed situation, such as an inherited view where parent_view.model != model
trans = self.pool.get('ir.translation')._get_source(cr, user, context['base_model_name'], 'view', context['lang'], node.get('string'))
if trans:
node.set('string', trans)
for attr_name in ('confirm', 'sum', 'avg', 'help', 'placeholder'):
attr_value = node.get(attr_name)
if attr_value:
trans = self.pool.get('ir.translation')._get_source(cr, user, self._name, 'view', context['lang'], attr_value)
if trans:
node.set(attr_name, trans)
for f in node:
if children or (node.tag == 'field' and f.tag in ('filter','separator')):
fields.update(self.__view_look_dom(cr, user, f, view_id, in_tree_view, model_fields, context))
transfer_modifiers_to_node(modifiers, node)
return fields
def _disable_workflow_buttons(self, cr, user, node):
""" Set the buttons in node to readonly if the user can't activate them. """
if user == 1:
# admin user can always activate workflow buttons
return node
# TODO handle the case of more than one workflow for a model or multiple
# transitions with different groups and same signal
usersobj = self.pool.get('res.users')
buttons = (n for n in node.getiterator('button') if n.get('type') != 'object')
for button in buttons:
user_groups = usersobj.read(cr, user, [user], ['groups_id'])[0]['groups_id']
cr.execute("""SELECT DISTINCT t.group_id
FROM wkf
INNER JOIN wkf_activity a ON a.wkf_id = wkf.id
INNER JOIN wkf_transition t ON (t.act_to = a.id)
WHERE wkf.osv = %s
AND t.signal = %s
AND t.group_id is NOT NULL
""", (self._name, button.get('name')))
group_ids = [x[0] for x in cr.fetchall() if x[0]]
can_click = not group_ids or bool(set(user_groups).intersection(group_ids))
button.set('readonly', str(int(not can_click)))
return node
def __view_look_dom_arch(self, cr, user, node, view_id, context=None):
""" Return an architecture and a description of all the fields.
The field description combines the result of fields_get() and
__view_look_dom().
:param node: the architecture as as an etree
:return: a tuple (arch, fields) where arch is the given node as a
string and fields is the description of all the fields.
"""
fields = {}
if node.tag == 'diagram':
if node.getchildren()[0].tag == 'node':
node_model = self.pool.get(node.getchildren()[0].get('object'))
node_fields = node_model.fields_get(cr, user, None, context)
fields.update(node_fields)
if not node.get("create") and not node_model.check_access_rights(cr, user, 'create', raise_exception=False):
node.set("create", 'false')
if node.getchildren()[1].tag == 'arrow':
arrow_fields = self.pool.get(node.getchildren()[1].get('object')).fields_get(cr, user, None, context)
fields.update(arrow_fields)
else:
fields = self.fields_get(cr, user, None, context)
fields_def = self.__view_look_dom(cr, user, node, view_id, False, fields, context=context)
node = self._disable_workflow_buttons(cr, user, node)
if node.tag in ('kanban', 'tree', 'form', 'gantt'):
for action, operation in (('create', 'create'), ('delete', 'unlink'), ('edit', 'write')):
if not node.get(action) and not self.check_access_rights(cr, user, operation, raise_exception=False):
node.set(action, 'false')
arch = etree.tostring(node, encoding="utf-8").replace('\t', '')
for k in fields.keys():
if k not in fields_def:
del fields[k]
for field in fields_def:
if field == 'id':
# sometime, the view may contain the (invisible) field 'id' needed for a domain (when 2 objects have cross references)
fields['id'] = {'readonly': True, 'type': 'integer', 'string': 'ID'}
elif field in fields:
fields[field].update(fields_def[field])
else:
cr.execute('select name, model from ir_ui_view where (id=%s or inherit_id=%s) and arch like %s', (view_id, view_id, '%%%s%%' % field))
res = cr.fetchall()[:]
model = res[0][1]
res.insert(0, ("Can't find field '%s' in the following view parts composing the view of object model '%s':" % (field, model), None))
msg = "\n * ".join([r[0] for r in res])
msg += "\n\nEither you wrongly customized this view, or some modules bringing those views are not compatible with your current data model"
_logger.error(msg)
raise except_orm('View error', msg)
return arch, fields
def _get_default_form_view(self, cr, user, context=None):
""" Generates a default single-line form view using all fields
of the current model except the m2m and o2m ones.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a form view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('form', string=self._description)
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
for field, descriptor in self.fields_get(cr, user, context=context).iteritems():
if descriptor['type'] in ('one2many', 'many2many'):
continue
etree.SubElement(view, 'field', name=field)
if descriptor['type'] == 'text':
etree.SubElement(view, 'newline')
return view
def _get_default_search_view(self, cr, user, context=None):
""" Generates a single-field search view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('search', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_tree_view(self, cr, user, context=None):
""" Generates a single-field tree view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('tree', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_calendar_view(self, cr, user, context=None):
""" Generates a default calendar view by trying to infer
calendar fields from a number of pre-set attribute names
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a calendar view
:rtype: etree._Element
"""
def set_first_of(seq, in_, to):
"""Sets the first value of ``seq`` also found in ``in_`` to
the ``to`` attribute of the view being closed over.
Returns whether it's found a suitable value (and set it on
the attribute) or not
"""
for item in seq:
if item in in_:
view.set(to, item)
return True
return False
view = etree.Element('calendar', string=self._description)
etree.SubElement(view, 'field', self._rec_name_fallback(cr, user, context))
if self._date_name not in self._columns:
date_found = False
for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
if dt in self._columns:
self._date_name = dt
date_found = True
break
if not date_found:
raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
view.set('date_start', self._date_name)
set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
self._columns, 'color')
if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
self._columns, 'date_stop'):
if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
self._columns, 'date_delay'):
raise except_orm(
_('Invalid Object Architecture!'),
_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
return view
#
# if view_id, view_type is not required
#
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
"""
Get the detailed composition of the requested view like fields, model, view architecture
:param cr: database cursor
:param user: current user id
:param view_id: id of the view or None
:param view_type: type of the view to return if view_id is None ('form', tree', ...)
:param context: context arguments, like lang, time zone
:param toolbar: true to include contextual actions
:param submenu: deprecated
:return: dictionary describing the composition of the requested view (including inherited views and extensions)
:raise AttributeError:
* if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
* if some tag other than 'position' is found in parent view
:raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
"""
if context is None:
context = {}
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
def raise_view_error(error_msg, child_view_id):
view, child_view = self.pool.get('ir.ui.view').browse(cr, user, [view_id, child_view_id], context)
error_msg = error_msg % {'parent_xml_id': view.xml_id}
raise AttributeError("View definition error for inherited view '%s' on model '%s': %s"
% (child_view.xml_id, self._name, error_msg))
def locate(source, spec):
""" Locate a node in a source (parent) architecture.
Given a complete source (parent) architecture (i.e. the field
`arch` in a view), and a 'spec' node (a node in an inheriting
view that specifies the location in the source view of what
should be changed), return (if it exists) the node in the
source view matching the specification.
:param source: a parent architecture to modify
:param spec: a modifying node in an inheriting view
:return: a node in the source matching the spec
"""
if spec.tag == 'xpath':
nodes = source.xpath(spec.get('expr'))
return nodes[0] if nodes else None
elif spec.tag == 'field':
# Only compare the field name: a field can be only once in a given view
# at a given level (and for multilevel expressions, we should use xpath
# inheritance spec anyway).
for node in source.getiterator('field'):
if node.get('name') == spec.get('name'):
return node
return None
for node in source.getiterator(spec.tag):
if isinstance(node, SKIPPED_ELEMENT_TYPES):
continue
if all(node.get(attr) == spec.get(attr) \
for attr in spec.attrib
if attr not in ('position','version')):
# Version spec should match parent's root element's version
if spec.get('version') and spec.get('version') != source.get('version'):
return None
return node
return None
def apply_inheritance_specs(source, specs_arch, inherit_id=None):
""" Apply an inheriting view.
Apply to a source architecture all the spec nodes (i.e. nodes
describing where and what changes to apply to some parent
architecture) given by an inheriting view.
:param source: a parent architecture to modify
:param specs_arch: a modifying architecture in an inheriting view
:param inherit_id: the database id of the inheriting view
:return: a modified source where the specs are applied
"""
specs_tree = etree.fromstring(encode(specs_arch))
# Queue of specification nodes (i.e. nodes describing where and
# changes to apply to some parent architecture).
specs = [specs_tree]
while len(specs):
spec = specs.pop(0)
if isinstance(spec, SKIPPED_ELEMENT_TYPES):
continue
if spec.tag == 'data':
specs += [ c for c in specs_tree ]
continue
node = locate(source, spec)
if node is not None:
pos = spec.get('position', 'inside')
if pos == 'replace':
if node.getparent() is None:
source = copy.deepcopy(spec[0])
else:
for child in spec:
node.addprevious(child)
node.getparent().remove(node)
elif pos == 'attributes':
for child in spec.getiterator('attribute'):
attribute = (child.get('name'), child.text and child.text.encode('utf8') or None)
if attribute[1]:
node.set(attribute[0], attribute[1])
else:
del(node.attrib[attribute[0]])
else:
sib = node.getnext()
for child in spec:
if pos == 'inside':
node.append(child)
elif pos == 'after':
if sib is None:
node.addnext(child)
node = child
else:
sib.addprevious(child)
elif pos == 'before':
node.addprevious(child)
else:
raise_view_error("Invalid position value: '%s'" % pos, inherit_id)
else:
attrs = ''.join([
' %s="%s"' % (attr, spec.get(attr))
for attr in spec.attrib
if attr != 'position'
])
tag = "<%s%s>" % (spec.tag, attrs)
if spec.get('version') and spec.get('version') != source.get('version'):
raise_view_error("Mismatching view API version for element '%s': %r vs %r in parent view '%%(parent_xml_id)s'" % \
(tag, spec.get('version'), source.get('version')), inherit_id)
raise_view_error("Element '%s' not found in parent view '%%(parent_xml_id)s'" % tag, inherit_id)
return source
def apply_view_inheritance(cr, user, source, inherit_id):
""" Apply all the (directly and indirectly) inheriting views.
:param source: a parent architecture to modify (with parent
modifications already applied)
:param inherit_id: the database view_id of the parent view
:return: a modified source where all the modifying architecture
are applied
"""
sql_inherit = self.pool.get('ir.ui.view').get_inheriting_views_arch(cr, user, inherit_id, self._name, context=context)
for (view_arch, view_id) in sql_inherit:
source = apply_inheritance_specs(source, view_arch, view_id)
source = apply_view_inheritance(cr, user, source, view_id)
return source
result = {'type': view_type, 'model': self._name}
sql_res = False
parent_view_model = None
view_ref = context.get(view_type + '_view_ref')
# Search for a root (i.e. without any parent) view.
while True:
if view_ref and not view_id:
if '.' in view_ref:
module, view_ref = view_ref.split('.', 1)
cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
view_ref_res = cr.fetchone()
if view_ref_res:
view_id = view_ref_res[0]
if view_id:
cr.execute("""SELECT arch,name,field_parent,id,type,inherit_id,model
FROM ir_ui_view
WHERE id=%s""", (view_id,))
else:
cr.execute("""SELECT arch,name,field_parent,id,type,inherit_id,model
FROM ir_ui_view
WHERE model=%s AND type=%s AND inherit_id IS NULL
ORDER BY priority""", (self._name, view_type))
sql_res = cr.dictfetchone()
if not sql_res:
break
view_id = sql_res['inherit_id'] or sql_res['id']
parent_view_model = sql_res['model']
if not sql_res['inherit_id']:
break
# if a view was found
if sql_res:
source = etree.fromstring(encode(sql_res['arch']))
result.update(
arch=apply_view_inheritance(cr, user, source, sql_res['id']),
type=sql_res['type'],
view_id=sql_res['id'],
name=sql_res['name'],
field_parent=sql_res['field_parent'] or False)
else:
# otherwise, build some kind of default view
try:
view = getattr(self, '_get_default_%s_view' % view_type)(
cr, user, context)
except AttributeError:
# what happens here, graph case?
raise except_orm(_('Invalid Architecture!'), _("There is no view of type '%s' defined for the structure!") % view_type)
result.update(
arch=view,
name='default',
field_parent=False,
view_id=0)
if parent_view_model != self._name:
ctx = context.copy()
ctx['base_model_name'] = parent_view_model
else:
ctx = context
xarch, xfields = self.__view_look_dom_arch(cr, user, result['arch'], view_id, context=ctx)
result['arch'] = xarch
result['fields'] = xfields
if toolbar:
def clean(x):
x = x[2]
for key in ('report_sxw_content', 'report_rml_content',
'report_sxw', 'report_rml',
'report_sxw_content_data', 'report_rml_content_data'):
if key in x:
del x[key]
return x
ir_values_obj = self.pool.get('ir.values')
resprint = ir_values_obj.get(cr, user, 'action',
'client_print_multi', [(self._name, False)], False,
context)
resaction = ir_values_obj.get(cr, user, 'action',
'client_action_multi', [(self._name, False)], False,
context)
resrelate = ir_values_obj.get(cr, user, 'action',
'client_action_relate', [(self._name, False)], False,
context)
resaction = [clean(action) for action in resaction
if view_type == 'tree' or not action[2].get('multi')]
resprint = [clean(print_) for print_ in resprint
if view_type == 'tree' or not print_[2].get('multi')]
#When multi="True" set it will display only in More of the list view
resrelate = [clean(action) for action in resrelate
if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
for x in itertools.chain(resprint, resaction, resrelate):
x['string'] = x['name']
result['toolbar'] = {
'print': resprint,
'action': resaction,
'relate': resrelate
}
return result
_view_look_dom_arch = __view_look_dom_arch
def search_count(self, cr, user, args, context=None):
if not context:
context = {}
res = self.search(cr, user, args, context=context, count=True)
if isinstance(res, list):
return len(res)
return res
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
"""
Search for records based on a search domain.
:param cr: database cursor
:param user: current user id
:param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
:param offset: optional number of results to skip in the returned values (default: 0)
:param limit: optional max number of records to return (default: **None**)
:param order: optional columns to sort by (default: self._order=id )
:param context: optional context arguments, like lang, time zone
:type context: dictionary
:param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
:return: id or list of ids of records matching the criteria
:rtype: integer or list of integers
:raise AccessError: * if user tries to bypass access rules for read on the requested object.
**Expressing a search domain (args)**
Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
* **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
* **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
The semantics of most of these operators are obvious.
The ``child_of`` operator will look for records who are children or grand-children of a given record,
according to the semantics of this model (i.e following the relationship field named by
``self._parent_name``, by default ``parent_id``.
* **value** must be a valid value to compare with the values of **field_name**, depending on its type.
Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
Be very careful about this when you combine them the first time.
Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
[('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
(name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
"""
return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
def name_get(self, cr, user, ids, context=None):
"""Returns the preferred display value (text representation) for the records with the
given ``ids``. By default this will be the value of the ``name`` column, unless
the model implements a custom behavior.
Can sometimes be seen as the inverse function of :meth:`~.name_search`, but it is not
guaranteed to be.
:rtype: list(tuple)
:return: list of pairs ``(id,text_repr)`` for all records with the given ``ids``.
"""
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
if self._rec_name in self._all_columns:
rec_name_column = self._all_columns[self._rec_name].column
return [(r['id'], rec_name_column.as_display_name(cr, user, self, r[self._rec_name], context=context))
for r in self.read(cr, user, ids, [self._rec_name],
load='_classic_write', context=context)]
return [(id, "%s,%s" % (self._name, id)) for id in ids]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
"""Search for records that have a display name matching the given ``name`` pattern if compared
with the given ``operator``, while also matching the optional search domain (``args``).
This is used for example to provide suggestions based on a partial value for a relational
field.
Sometimes be seen as the inverse function of :meth:`~.name_get`, but it is not
guaranteed to be.
This method is equivalent to calling :meth:`~.search` with a search domain based on ``name``
and then :meth:`~.name_get` on the result of the search.
:param list args: optional search domain (see :meth:`~.search` for syntax),
specifying further restrictions
:param str operator: domain operator for matching the ``name`` pattern, such as ``'like'``
or ``'='``.
:param int limit: optional max number of records to return
:rtype: list
:return: list of pairs ``(id,text_repr)`` for all matching records.
"""
return self._name_search(cr, user, name, args, operator, context, limit)
def name_create(self, cr, uid, name, context=None):
"""Creates a new record by calling :meth:`~.create` with only one
value provided: the name of the new record (``_rec_name`` field).
The new record will also be initialized with any default values applicable
to this model, or provided through the context. The usual behavior of
:meth:`~.create` applies.
Similarly, this method may raise an exception if the model has multiple
required fields and some do not have default values.
:param name: name of the record to create
:rtype: tuple
:return: the :meth:`~.name_get` pair value for the newly-created record.
"""
rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
# private implementation of name_search, allows passing a dedicated user for the name_get part to
# solve some access rights issues
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
if context is None:
context = {}
args = args[:]
# optimize out the default criterion of ``ilike ''`` that matches everything
if not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
access_rights_uid = name_get_uid or user
ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
res = self.name_get(cr, access_rights_uid, ids, context)
return res
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
res2 = {}
self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
if not fields:
fields = self._columns.keys() + self._inherit_fields.keys()
#FIXME: collect all calls to _get_source into one SQL call.
for lang in langs:
res[lang] = {'code': lang}
for f in fields:
if f in self._columns:
res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
if res_trans:
res[lang][f] = res_trans
else:
res[lang][f] = self._columns[f].string
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), fields)
res2 = self.pool.get(table).read_string(cr, uid, id, langs, cols, context)
for lang in res2:
if lang in res:
res[lang]['code'] = lang
for f in res2[lang]:
res[lang][f] = res2[lang][f]
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
#FIXME: try to only call the translation in one SQL
for lang in langs:
for field in vals:
if field in self._columns:
src = self._columns[field].string
self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), vals)
if cols:
self.pool.get(table).write_string(cr, uid, id, langs, vals, context)
return True
def _add_missing_default_values(self, cr, uid, values, context=None):
missing_defaults = []
avoid_tables = [] # avoid overriding inherited values when parent is set
for tables, parent_field in self._inherits.items():
if parent_field in values:
avoid_tables.append(tables)
for field in self._columns.keys():
if not field in values:
missing_defaults.append(field)
for field in self._inherit_fields.keys():
if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
missing_defaults.append(field)
if len(missing_defaults):
# override defaults with the provided values, never allow the other way around
defaults = self.default_get(cr, uid, missing_defaults, context)
for dv in defaults:
if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
defaults[dv] = [(6, 0, defaults[dv])]
if (dv in self._columns and self._columns[dv]._type == 'one2many' \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
defaults[dv] = [(0, 0, x) for x in defaults[dv]]
defaults.update(values)
values = defaults
return values
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi``.
"""
try:
getattr(self, '_ormcache')
self._ormcache = {}
self.pool._any_cache_cleared = True
except AttributeError:
pass
def _read_group_fill_results(self, cr, uid, domain, groupby, groupby_list, aggregated_fields,
read_group_result, read_group_order=None, context=None):
"""Helper method for filling in empty groups for all possible values of
the field being grouped by"""
# self._group_by_full should map groupable fields to a method that returns
# a list of all aggregated values that we want to display for this field,
# in the form of a m2o-like pair (key,label).
# This is useful to implement kanban views for instance, where all columns
# should be displayed even if they don't contain any record.
# Grab the list of all groups that should be displayed, including all present groups
present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
read_group_order=read_group_order,
access_rights_uid=openerp.SUPERUSER_ID,
context=context)
result_template = dict.fromkeys(aggregated_fields, False)
result_template[groupby + '_count'] = 0
if groupby_list and len(groupby_list) > 1:
result_template['__context'] = {'group_by': groupby_list[1:]}
# Merge the left_side (current results as dicts) with the right_side (all
# possible values as m2o pairs). Both lists are supposed to be using the
# same ordering, and can be merged in one pass.
result = []
known_values = {}
def append_left(left_side):
grouped_value = left_side[groupby] and left_side[groupby][0]
if not grouped_value in known_values:
result.append(left_side)
known_values[grouped_value] = left_side
else:
count_attr = groupby + '_count'
known_values[grouped_value].update({count_attr: left_side[count_attr]})
def append_right(right_side):
grouped_value = right_side[0]
if not grouped_value in known_values:
line = dict(result_template)
line[groupby] = right_side
line['__domain'] = [(groupby,'=',grouped_value)] + domain
result.append(line)
known_values[grouped_value] = line
while read_group_result or all_groups:
left_side = read_group_result[0] if read_group_result else None
right_side = all_groups[0] if all_groups else None
assert left_side is None or left_side[groupby] is False \
or isinstance(left_side[groupby], (tuple,list)), \
'M2O-like pair expected, got %r' % left_side[groupby]
assert right_side is None or isinstance(right_side, (tuple,list)), \
'M2O-like pair expected, got %r' % right_side
if left_side is None:
append_right(all_groups.pop(0))
elif right_side is None:
append_left(read_group_result.pop(0))
elif left_side[groupby] == right_side:
append_left(read_group_result.pop(0))
all_groups.pop(0) # discard right_side
elif not left_side[groupby] or not left_side[groupby][0]:
# left side == "Undefined" entry, not present on right_side
append_left(read_group_result.pop(0))
else:
append_right(all_groups.pop(0))
if folded:
for r in result:
r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
return result
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
"""
Get the list of records in list view grouped by the given ``groupby`` fields
:param cr: database cursor
:param uid: current user id
:param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
:param list fields: list of fields present in the list view specified on the object
:param list groupby: fields by which the records will be grouped
:param int offset: optional number of records to skip
:param int limit: optional max number of records to return
:param dict context: context arguments, like lang, time zone
:param list orderby: optional ``order by`` specification, for
overriding the natural sort ordering of the
groups, see also :py:meth:`~osv.osv.osv.search`
(supported only for many2one fields currently)
:return: list of dictionaries(one dictionary for each record) containing:
* the values of fields grouped by the fields in ``groupby`` argument
* __domain: list of tuples specifying the search criteria
* __context: dictionary with argument like ``groupby``
:rtype: [{'field_name_1': value, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
context = context or {}
self.check_access_rights(cr, uid, 'read')
if not fields:
fields = self._columns.keys()
query = self._where_calc(cr, uid, domain, context=context)
self._apply_ir_rules(cr, uid, query, 'read', context=context)
# Take care of adding join(s) if groupby is an '_inherits'ed field
groupby_list = groupby
qualified_groupby_field = groupby
if groupby:
if isinstance(groupby, list):
groupby = groupby[0]
qualified_groupby_field = self._inherits_join_calc(groupby, query)
if groupby:
assert not groupby or groupby in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
groupby_def = self._columns.get(groupby) or (self._inherit_fields.get(groupby) and self._inherit_fields.get(groupby)[2])
assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fget = self.fields_get(cr, uid, fields)
flist = ''
group_count = group_by = groupby
if groupby:
if fget.get(groupby):
groupby_type = fget[groupby]['type']
if groupby_type in ('date', 'datetime'):
qualified_groupby_field = "to_char(%s,'yyyy-mm')" % qualified_groupby_field
flist = "%s as %s " % (qualified_groupby_field, groupby)
elif groupby_type == 'boolean':
qualified_groupby_field = "coalesce(%s,false)" % qualified_groupby_field
flist = "%s as %s " % (qualified_groupby_field, groupby)
else:
flist = qualified_groupby_field
else:
# Don't allow arbitrary values, as this would be a SQL injection vector!
raise except_orm(_('Invalid group_by'),
_('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(groupby,))
aggregated_fields = [
f for f in fields
if f not in ('id', 'sequence')
if fget[f]['type'] in ('integer', 'float')
if (f in self._columns and getattr(self._columns[f], '_classic_write'))]
for f in aggregated_fields:
group_operator = fget[f].get('group_operator', 'sum')
if flist:
flist += ', '
qualified_field = '"%s"."%s"' % (self._table, f)
flist += "%s(%s) AS %s" % (group_operator, qualified_field, f)
gb = groupby and (' GROUP BY ' + qualified_groupby_field) or ''
from_clause, where_clause, where_clause_params = query.get_sql()
where_clause = where_clause and ' WHERE ' + where_clause
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
if len(groupby_list) < 2 and context.get('group_by_no_leaf'):
group_count = '_'
cr.execute('SELECT min(%s.id) AS id, count(%s.id) AS %s_count' % (self._table, self._table, group_count) + (flist and ',') + flist + ' FROM ' + from_clause + where_clause + gb + limit_str + offset_str, where_clause_params)
alldata = {}
groupby = group_by
for r in cr.dictfetchall():
for fld, val in r.items():
if val is None: r[fld] = False
alldata[r['id']] = r
del r['id']
order = orderby or groupby
data_ids = self.search(cr, uid, [('id', 'in', alldata.keys())], order=order, context=context)
# the IDs of records that have groupby field value = False or '' should be included too
data_ids += set(alldata.keys()).difference(data_ids)
if groupby:
data = self.read(cr, uid, data_ids, [groupby], context=context)
# restore order of the search as read() uses the default _order (this is only for groups, so the footprint of data should be small):
data_dict = dict((d['id'], d[groupby] ) for d in data)
result = [{'id': i, groupby: data_dict[i]} for i in data_ids]
else:
result = [{'id': i} for i in data_ids]
for d in result:
if groupby:
d['__domain'] = [(groupby, '=', alldata[d['id']][groupby] or False)] + domain
if not isinstance(groupby_list, (str, unicode)):
if groupby or not context.get('group_by_no_leaf', False):
d['__context'] = {'group_by': groupby_list[1:]}
if groupby and groupby in fget:
if d[groupby] and fget[groupby]['type'] in ('date', 'datetime'):
dt = datetime.datetime.strptime(alldata[d['id']][groupby][:7], '%Y-%m')
days = calendar.monthrange(dt.year, dt.month)[1]
date_value = datetime.datetime.strptime(d[groupby][:10], '%Y-%m-%d')
d[groupby] = babel.dates.format_date(
date_value, format='MMMM yyyy', locale=context.get('lang', 'en_US'))
d['__domain'] = [(groupby, '>=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-01', '%Y-%m-%d').strftime('%Y-%m-%d') or False),\
(groupby, '<=', alldata[d['id']][groupby] and datetime.datetime.strptime(alldata[d['id']][groupby][:7] + '-' + str(days), '%Y-%m-%d').strftime('%Y-%m-%d') or False)] + domain
del alldata[d['id']][groupby]
d.update(alldata[d['id']])
del d['id']
if groupby and groupby in self._group_by_full:
result = self._read_group_fill_results(cr, uid, domain, groupby, groupby_list,
aggregated_fields, result, read_group_order=order,
context=context)
return result
def _inherits_join_add(self, current_model, parent_model_name, query):
"""
Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
:param current_model: current model object
:param parent_model_name: name of the parent model for which the clauses should be added
:param query: query object on which the JOIN should be added
"""
inherits_field = current_model._inherits[parent_model_name]
parent_model = self.pool.get(parent_model_name)
parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
return parent_alias
def _inherits_join_calc(self, field, query):
"""
Adds missing table select and join clause(s) to ``query`` for reaching
the field coming from an '_inherits' parent table (no duplicates).
:param field: name of inherited field to reach
:param query: query object on which the JOIN should be added
:return: qualified name of field, to be used in SELECT clause
"""
current_table = self
parent_alias = '"%s"' % current_table._table
while field in current_table._inherit_fields and not field in current_table._columns:
parent_model_name = current_table._inherit_fields[field][0]
parent_table = self.pool.get(parent_model_name)
parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
current_table = parent_table
return '%s."%s"' % (parent_alias, field)
def _parent_store_compute(self, cr):
if not self._parent_store:
return
_logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
if self._parent_order:
where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
for id in cr.fetchall():
pos2 = browse_rec(id[0], pos2)
cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
return pos2 + 1
query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
if self._parent_order:
query += ' order by ' + self._parent_order
pos = 0
cr.execute(query)
for (root,) in cr.fetchall():
pos = browse_rec(root, pos)
return True
def _update_store(self, cr, f, k):
_logger.info("storing computed values of fields.function '%s'", k)
ss = self._columns[k]._symbol_set
update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
cr.execute('select id from '+self._table)
ids_lst = map(lambda x: x[0], cr.fetchall())
while ids_lst:
iids = ids_lst[:40]
ids_lst = ids_lst[40:]
res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
for key, val in res.items():
if f._multi:
val = val[k]
# if val is a many2one, just write the ID
if type(val) == tuple:
val = val[0]
if val is not False:
cr.execute(update_query, (ss[1](val), key))
def _check_selection_field_value(self, cr, uid, field, value, context=None):
"""Raise except_orm if value is not among the valid values for the selection field"""
if self._columns[field]._type == 'reference':
val_model, val_id_str = value.split(',', 1)
val_id = False
try:
val_id = long(val_id_str)
except ValueError:
pass
if not val_id:
raise except_orm(_('ValidateError'),
_('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
val = val_model
else:
val = value
if isinstance(self._columns[field].selection, (tuple, list)):
if val in dict(self._columns[field].selection):
return
elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
return
raise except_orm(_('ValidateError'),
_('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
def _check_removed_columns(self, cr, log=False):
# iterate on the database columns to drop the NOT NULL constraints
# of fields which were required but have been removed (or will be added by another module)
columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
columns += MAGIC_COLUMNS
cr.execute("SELECT a.attname, a.attnotnull"
" FROM pg_class c, pg_attribute a"
" WHERE c.relname=%s"
" AND c.oid=a.attrelid"
" AND a.attisdropped=%s"
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
" AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
for column in cr.dictfetchall():
if log:
_logger.debug("column %s is in the table %s but not in the corresponding object %s",
column['attname'], self._table, self._name)
if column['attnotnull']:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
def _save_constraint(self, cr, constraint_name, type):
"""
Record the creation of a constraint for this model, to make it possible
to delete it later when the module is uninstalled. Type can be either
'f' or 'u' depending on the constraint being a foreign key or not.
"""
if not self._module:
# no need to save constraints for custom models as they're not part
# of any module
return
assert type in ('f', 'u')
cr.execute("""
SELECT 1 FROM ir_model_constraint, ir_module_module
WHERE ir_model_constraint.module=ir_module_module.id
AND ir_model_constraint.name=%s
AND ir_module_module.name=%s
""", (constraint_name, self._module))
if not cr.rowcount:
cr.execute("""
INSERT INTO ir_model_constraint
(name, date_init, date_update, module, model, type)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s), %s)""",
(constraint_name, self._module, self._name, type))
def _save_relation_table(self, cr, relation_table):
"""
Record the creation of a many2many for this model, to make it possible
to delete it later when the module is uninstalled.
"""
cr.execute("""
SELECT 1 FROM ir_model_relation, ir_module_module
WHERE ir_model_relation.module=ir_module_module.id
AND ir_model_relation.name=%s
AND ir_module_module.name=%s
""", (relation_table, self._module))
if not cr.rowcount:
cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s))""",
(relation_table, self._module, self._name))
# checked version: for direct m2o starting from `self`
def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
assert self.is_transient() or not dest_model.is_transient(), \
'Many2One relationships from non-transient Model to TransientModel are forbidden'
if self.is_transient() and not dest_model.is_transient():
# TransientModel relationships to regular Models are annoying
# usually because they could block deletion due to the FKs.
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
def _drop_constraint(self, cr, source_table, constraint_name):
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
# Find FK constraint(s) currently established for the m2o field,
# and see whether they are stale or not
cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
cl2.relname as foreign_table
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND cl1.relname = %s
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND att1.attname = %s
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND att2.attname = %s
AND con.contype = 'f'""", (source_table, source_field, 'id'))
constraints = cr.dictfetchall()
if constraints:
if len(constraints) == 1:
# Is it the right constraint?
cons, = constraints
if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
or cons['foreign_table'] != dest_model._table:
# Wrong FK: drop it and recreate
_schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
else:
# it's all good, nothing to do!
return
else:
# Multiple FKs found for the same field, drop them all, and re-create
for cons in constraints:
_schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
# (re-)create the FK
self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
def _auto_init(self, cr, context=None):
"""
Call _field_create and, unless _auto is False:
- create the corresponding table in database for the model,
- possibly add the parent columns in database,
- possibly add the columns 'create_uid', 'create_date', 'write_uid',
'write_date' in database if _log_access is True (the default),
- report on database columns no more existing in _columns,
- remove no more existing not null constraints,
- alter existing database columns to match _columns,
- create database tables to match _columns,
- add database indices to match _columns,
- save in self._foreign_keys a list a foreign keys to create (see
_auto_end).
"""
self._foreign_keys = set()
raise_on_invalid_object_name(self._name)
if context is None:
context = {}
store_compute = False
todo_end = []
update_custom_fields = context.get('update_custom_fields', False)
self._field_create(cr, context=context)
create = not self._table_exist(cr)
if getattr(self, '_auto', True):
if create:
self._create_table(cr)
cr.commit()
if self._parent_store:
if not self._parent_columns_exist(cr):
self._create_parent_columns(cr)
store_compute = True
# Create the create_uid, create_date, write_uid, write_date, columns if desired.
if self._log_access:
self._add_log_columns(cr)
self._check_removed_columns(cr, log=False)
# iterate on the "object columns"
column_data = self._select_column_data(cr)
for k, f in self._columns.iteritems():
if k in MAGIC_COLUMNS:
continue
# Don't update custom (also called manual) fields
if f.manual and not update_custom_fields:
continue
if isinstance(f, fields.one2many):
self._o2m_raise_on_missing_reference(cr, f)
elif isinstance(f, fields.many2many):
self._m2m_raise_or_create_relation(cr, f)
else:
res = column_data.get(k)
# The field is not found as-is in database, try if it
# exists with an old name.
if not res and hasattr(f, 'oldname'):
res = column_data.get(f.oldname)
if res:
cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
res['attname'] = k
column_data[k] = res
_schema.debug("Table '%s': renamed column '%s' to '%s'",
self._table, f.oldname, k)
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
# constraints.
if res:
f_pg_type = res['typname']
f_pg_size = res['size']
f_pg_notnull = res['attnotnull']
if isinstance(f, fields.function) and not f.store and\
not getattr(f, 'nodrop', False):
_logger.info('column %s (%s) in table %s removed: converted to a function !\n',
k, f.string, self._table)
cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': dropped column '%s' with cascade",
self._table, k)
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
if f_obj_type:
ok = False
casts = [
('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
('varchar', 'text', 'TEXT', ''),
('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
('timestamp', 'date', 'date', '::date'),
('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
]
if f_pg_type == 'varchar' and f._type == 'char' and ((f.size is None and f_pg_size) or f_pg_size < f.size):
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
if f_pg_type != f_obj_type:
ok = True
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
cr.execute(('UPDATE "%s" SET "%s"=temp_change_size'+c[3]) % (self._table, k))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' changed type from %s to %s",
self._table, k, c[0], c[1])
break
if f_pg_type != f_obj_type:
if not ok:
i = 0
while True:
newname = k + '_moved' + str(i)
cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
"WHERE c.relname=%s " \
"AND a.attname=%s " \
"AND c.oid=a.attrelid ", (self._table, newname))
if not cr.fetchone()[0]:
break
i += 1
if f_pg_notnull:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
self._table, k, f_pg_type, f._type, newname)
# if the field is required and hasn't got a NOT NULL constraint
if f.required and f_pg_notnull == 0:
# set the field to the default value if any
if k in self._defaults:
if callable(self._defaults[k]):
default = self._defaults[k](self, cr, SUPERUSER_ID, context)
else:
default = self._defaults[k]
if default is not None:
ss = self._columns[k]._symbol_set
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
cr.execute(query, (ss[1](default),))
# add the NOT NULL constraint
cr.commit()
try:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
cr.commit()
_schema.debug("Table '%s': column '%s': added NOT NULL constraint",
self._table, k)
except Exception:
msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_schema.warning(msg, self._table, k, self._table, k)
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, k)
# Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
res2 = cr.dictfetchall()
if not res2 and f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
cr.commit()
if f._type == 'text':
# FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
"This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
" because there is a length limit for indexable btree values!\n"\
"Use a search view instead if you simply want to make the field searchable."
_schema.warning(msg, self._table, f._type, k)
if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one):
dest_model = self.pool.get(f._obj)
if dest_model._table != 'ir_actions':
self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
# The field doesn't exist in database. Create it if necessary.
else:
if not isinstance(f, fields.function) or f.store:
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, k, get_pg_type(f)[1])
# initialize it
if not create and k in self._defaults:
if callable(self._defaults[k]):
default = self._defaults[k](self, cr, SUPERUSER_ID, context)
else:
default = self._defaults[k]
ss = self._columns[k]._symbol_set
query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
cr.execute(query, (ss[1](default),))
cr.commit()
_logger.debug("Table '%s': setting default value of new column %s", self._table, k)
# remember the functions to call for the stored fields
if isinstance(f, fields.function):
order = 10
if f.store is not True: # i.e. if f.store is a dict
order = f.store[f.store.keys()[0]][2]
todo_end.append((order, self._update_store, (f, k)))
# and add constraints if needed
if isinstance(f, fields.many2one):
if not self.pool.get(f._obj):
raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool.get(f._obj)
ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it
if ref != 'ir_actions':
self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
if f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
if f.required:
try:
cr.commit()
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
_schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
self._table, k)
except Exception:
msg = "WARNING: unable to set column %s of table %s not null !\n"\
"Try to re-run: openerp-server --update=module\n"\
"If it doesn't work, update records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_logger.warning(msg, k, self._table, self._table, k)
cr.commit()
else:
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
create = not bool(cr.fetchone())
cr.commit() # start a new transaction
self._add_sql_constraints(cr)
if create:
self._execute_sql(cr)
if store_compute:
self._parent_store_compute(cr)
cr.commit()
return todo_end
def _auto_end(self, cr, context=None):
""" Create the foreign keys recorded by _auto_init. """
for t, k, r, d in self._foreign_keys:
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
cr.commit()
del self._foreign_keys
def _table_exist(self, cr):
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
return cr.rowcount
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
def _parent_columns_exist(self, cr):
cr.execute("""SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, 'parent_left'))
return cr.rowcount
def _create_parent_columns(self, cr):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
if 'parent_left' not in self._columns:
_logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
_logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if 'parent_right' not in self._columns:
_logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
_logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
self._parent_name, self._name)
cr.commit()
def _add_log_columns(self, cr):
for field, field_def in LOG_ACCESS_COLUMNS.iteritems():
cr.execute("""
SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, field))
if not cr.rowcount:
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
cr.commit()
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, field, field_def)
def _select_column_data(self, cr):
# attlen is the number of bytes necessary to represent the type when
# the type has a fixed size. If the type has a varying size attlen is
# -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
"FROM pg_class c,pg_attribute a,pg_type t " \
"WHERE c.relname=%s " \
"AND c.oid=a.attrelid " \
"AND a.atttypid=t.oid", (self._table,))
return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
def _o2m_raise_on_missing_reference(self, cr, f):
# TODO this check should be a method on fields.one2many.
other = self.pool.get(f._obj)
if other:
# TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys():
raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
def _m2m_raise_or_create_relation(self, cr, f):
m2m_tbl, col1, col2 = f._sql_names(self)
self._save_relation_table(cr, m2m_tbl)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
if not self.pool.get(f._obj):
raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool.get(f._obj)
ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
_schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
def _add_sql_constraints(self, cr):
"""
Modify this model's database table constraints so they match the one in
_sql_constraints.
"""
def unify_cons_text(txt):
return txt.lower().replace(', ',',').replace(' (','(')
for (key, con, _) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
self._save_constraint(cr, conname, 'u')
cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
existing_constraints = cr.dictfetchall()
sql_actions = {
'drop': {
'execute': False,
'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
self._table, conname, con),
'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
'order': 1,
},
'add': {
'execute': False,
'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
self._table, con),
'order': 2,
},
}
if not existing_constraints:
# constraint does not exists:
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
# constraint exists but its definition has changed:
sql_actions['drop']['execute'] = True
sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
# we need to add the constraint:
sql_actions = [item for item in sql_actions.values()]
sql_actions.sort(key=lambda x: x['order'])
for sql_action in [action for action in sql_actions if action['execute']]:
try:
cr.execute(sql_action['query'])
cr.commit()
_schema.debug(sql_action['msg_ok'])
except:
_schema.warning(sql_action['msg_err'])
cr.rollback()
def _execute_sql(self, cr):
""" Execute the SQL code from the _sql attribute (if any)."""
if hasattr(self, "_sql"):
for line in self._sql.split(';'):
line2 = line.replace('\n', '').strip()
if line2:
cr.execute(line2)
cr.commit()
#
# Update objects that uses this one to update their _inherits fields
#
def _inherits_reload_src(self):
""" Recompute the _inherit_fields mapping on each _inherits'd child model."""
for obj in self.pool.models.values():
if self._name in obj._inherits:
obj._inherits_reload()
def _inherits_reload(self):
""" Recompute the _inherit_fields mapping.
This will also call itself on each inherits'd child model.
"""
res = {}
for table in self._inherits:
other = self.pool.get(table)
for col in other._columns.keys():
res[col] = (table, self._inherits[table], other._columns[col], table)
for col in other._inherit_fields.keys():
res[col] = (table, self._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
self._inherit_fields = res
self._all_columns = self._get_column_infos()
self._inherits_reload_src()
def _get_column_infos(self):
"""Returns a dict mapping all fields names (direct fields and
inherited field via _inherits) to a ``column_info`` struct
giving detailed columns """
result = {}
for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
result[k] = fields.column_info(k, col, parent, m2o, original_parent)
for k, col in self._columns.iteritems():
result[k] = fields.column_info(k, col)
return result
def _inherits_check(self):
for table, field_name in self._inherits.items():
if field_name not in self._columns:
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
required=True, ondelete="cascade")
elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
_logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
self._columns[field_name].required = True
self._columns[field_name].ondelete = "cascade"
#def __getattr__(self, name):
# """
# Proxies attribute accesses to the `inherits` parent so we can call methods defined on the inherited parent
# (though inherits doesn't use Python inheritance).
# Handles translating between local ids and remote ids.
# Known issue: doesn't work correctly when using python's own super(), don't involve inherit-based inheritance
# when you have inherits.
# """
# for model, field in self._inherits.iteritems():
# proxy = self.pool.get(model)
# if hasattr(proxy, name):
# attribute = getattr(proxy, name)
# if not hasattr(attribute, '__call__'):
# return attribute
# break
# else:
# return super(orm, self).__getattr__(name)
# def _proxy(cr, uid, ids, *args, **kwargs):
# objects = self.browse(cr, uid, ids, kwargs.get('context', None))
# lst = [obj[field].id for obj in objects if obj[field]]
# return getattr(proxy, name)(cr, uid, lst, *args, **kwargs)
# return _proxy
def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
""" Return the definition of each field.
The returned value is a dictionary (indiced by field name) of
dictionaries. The _inherits'd fields are included. The string, help,
and selection (if present) attributes are translated.
:param cr: database cursor
:param user: current user id
:param allfields: list of fields
:param context: context arguments, like lang, time zone
:return: dictionary of field dictionaries, each one describing a field of the business object
:raise AccessError: * if user has no create/write rights on the requested object
"""
if context is None:
context = {}
write_access = self.check_access_rights(cr, user, 'write', raise_exception=False) \
or self.check_access_rights(cr, user, 'create', raise_exception=False)
res = {}
translation_obj = self.pool.get('ir.translation')
for parent in self._inherits:
res.update(self.pool.get(parent).fields_get(cr, user, allfields, context))
for f, field in self._columns.iteritems():
if (allfields and f not in allfields) or \
(field.groups and not self.user_has_groups(cr, user, groups=field.groups, context=context)):
continue
res[f] = fields.field_to_dict(self, cr, user, field, context=context)
if not write_access:
res[f]['readonly'] = True
res[f]['states'] = {}
if 'lang' in context:
if 'string' in res[f]:
res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context['lang'])
if res_trans:
res[f]['string'] = res_trans
if 'help' in res[f]:
help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context['lang'])
if help_trans:
res[f]['help'] = help_trans
if 'selection' in res[f]:
if isinstance(field.selection, (tuple, list)):
sel = field.selection
sel2 = []
for key, val in sel:
val2 = None
if val:
val2 = translation_obj._get_source(cr, user, self._name + ',' + f, 'selection', context['lang'], val)
sel2.append((key, val2 or val))
res[f]['selection'] = sel2
return res
def check_field_access_rights(self, cr, user, operation, fields, context=None):
"""
Check the user access rights on the given fields. This raises Access
Denied if the user does not have the rights. Otherwise it returns the
fields (as is if the fields is not falsy, or the readable/writable
fields if fields is falsy).
"""
def p(field_name):
"""Predicate to test if the user has access to the given field name."""
# Ignore requested field if it doesn't exist. This is ugly but
# it seems to happen at least with 'name_alias' on res.partner.
if field_name not in self._all_columns:
return True
field = self._all_columns[field_name].column
if user != SUPERUSER_ID and field.groups:
return self.user_has_groups(cr, user, groups=field.groups, context=context)
else:
return True
if not fields:
fields = filter(p, self._all_columns.keys())
else:
filtered_fields = filter(lambda a: not p(a), fields)
if filtered_fields:
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s', operation, user, self._name, ', '.join(filtered_fields))
raise except_orm(
_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. '
'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
return fields
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
""" Read records with given ids with the given fields
:param cr: database cursor
:param user: current user id
:param ids: id or list of the ids of the records to read
:param fields: optional list of field names to return (default: all fields would be returned)
:type fields: list (example ['field_name_1', ...])
:param context: optional context dictionary - it may contains keys for specifying certain options
like ``context_lang``, ``context_tz`` to alter the results of the call.
A special ``bin_size`` boolean flag may also be passed in the context to request the
value of all fields.binary columns to be returned as the size of the binary instead of its
contents. This can also be selectively overriden by passing a field-specific flag
in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
:return: list of dictionaries((dictionary per record asked)) with requested field values
:rtype: [{‘name_of_the_field’: value, ...}, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
if not context:
context = {}
self.check_access_rights(cr, user, 'read')
fields = self.check_field_access_rights(cr, user, 'read', fields)
if isinstance(ids, (int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: isinstance(x, dict) and x['id'] or x, select)
result = self._read_flat(cr, user, select, fields, context, load)
for r in result:
for key, v in r.items():
if v is None:
r[key] = False
if isinstance(ids, (int, long, dict)):
return result and result[0] or False
return result
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
if not context:
context = {}
if not ids:
return []
if fields_to_read is None:
fields_to_read = self._columns.keys()
# Construct a clause for the security rules.
# 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
# or will at least contain self._table.
rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
fields_pre = [f for f in fields_to_read if
f == self.CONCURRENCY_CHECK_FIELD
or (f in self._columns and getattr(self._columns[f], '_classic_write'))
] + self._inherits.values()
res = []
if len(fields_pre):
def convert_field(f):
f_qual = '%s."%s"' % (self._table, f) # need fully-qualified references in case len(tables) > 1
if f in ('create_date', 'write_date'):
return "date_trunc('second', %s) as %s" % (f_qual, f)
if f == self.CONCURRENCY_CHECK_FIELD:
if self._log_access:
return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,)
return "(now() at time zone 'UTC')::timestamp AS %s" % (f,)
if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
return 'length(%s) as "%s"' % (f_qual, f)
return f_qual
fields_pre2 = map(convert_field, fields_pre)
order_by = self._parent_order or self._order
select_fields = ','.join(fields_pre2 + ['%s.id' % self._table])
query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
if rule_clause:
query += " AND " + (' OR '.join(rule_clause))
query += " ORDER BY " + order_by
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute(query, [tuple(sub_ids)] + rule_params)
results = cr.dictfetchall()
result_ids = [x['id'] for x in results]
self._check_record_rules_result_count(cr, user, sub_ids, result_ids, 'read', context=context)
res.extend(results)
else:
res = map(lambda x: {'id': x}, ids)
if context.get('lang'):
for f in fields_pre:
if f == self.CONCURRENCY_CHECK_FIELD:
continue
if self._columns[f].translate:
ids = [x['id'] for x in res]
#TODO: optimize out of this loop
res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context['lang'], ids)
for r in res:
r[f] = res_trans.get(r['id'], False) or r[f]
for table in self._inherits:
col = self._inherits[table]
cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()]
if not cols:
continue
res2 = self.pool.get(table).read(cr, user, [x[col] for x in res], cols, context, load)
res3 = {}
for r in res2:
res3[r['id']] = r
del r['id']
for record in res:
if not record[col]: # if the record is deleted from _inherits table?
continue
record.update(res3[record[col]])
if col not in fields_to_read:
del record[col]
# all fields which need to be post-processed by a simple function (symbol_get)
fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read)
if fields_post:
for r in res:
for f in fields_post:
r[f] = self._columns[f]._symbol_get(r[f])
ids = [x['id'] for x in res]
# all non inherited fields for which the attribute whose name is in load is False
fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
# Compute POST fields
todo = {}
for f in fields_post:
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key, val in todo.items():
if key:
res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res)
assert res2 is not None, \
'The function field "%s" on the "%s" model returned None\n' \
'(a dictionary was expected).' % (val[0], self._name)
for pos in val:
for record in res:
if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
multi_fields = res2.get(record['id'],{})
if multi_fields:
record[pos] = multi_fields.get(pos,[])
else:
for f in val:
res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
for record in res:
if res2:
record[f] = res2[record['id']]
else:
record[f] = []
# Warn about deprecated fields now that fields_pre and fields_post are computed
# Explicitly use list() because we may receive tuples
for f in list(fields_pre) + list(fields_post):
field_column = self._all_columns.get(f) and self._all_columns.get(f).column
if field_column and field_column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, f, field_column.deprecated)
readonly = None
for vals in res:
for field in vals.copy():
fobj = None
if field in self._columns:
fobj = self._columns[field]
if not fobj:
continue
groups = fobj.read
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
if type(vals[field]) == type([]):
vals[field] = []
elif type(vals[field]) == type(0.0):
vals[field] = 0
elif type(vals[field]) == type(''):
vals[field] = '=No Permission='
else:
vals[field] = False
return res
# TODO check READ access
def perm_read(self, cr, user, ids, context=None, details=True):
"""
Returns some metadata about the given records.
:param details: if True, \*_uid fields are replaced with the name of the user
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
* id: object id
* create_uid: user who created the record
* create_date: date when the record was created
* write_uid: last user who changed the record
* write_date: date of the last change to the record
* xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
"""
if not context:
context = {}
if not ids:
return []
fields = ''
uniq = isinstance(ids, (int, long))
if uniq:
ids = [ids]
fields = ['id']
if self._log_access:
fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
quoted_table = '"%s"' % self._table
fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields)
query = '''SELECT %s, __imd.module, __imd.name
FROM %s LEFT JOIN ir_model_data __imd
ON (__imd.model = %%s and __imd.res_id = %s.id)
WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
cr.execute(query, (self._name, tuple(ids)))
res = cr.dictfetchall()
for r in res:
for key in r:
r[key] = r[key] or False
if details and key in ('write_uid', 'create_uid') and r[key]:
try:
r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
except Exception:
pass # Leave the numeric uid there
r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
del r['name'], r['module']
if uniq:
return res[ids[0]]
return res
def _check_concurrency(self, cr, ids, context):
if not context:
return
if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
return
check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
for sub_ids in cr.split_for_in_conditions(ids):
ids_to_check = []
for id in sub_ids:
id_ref = "%s,%s" % (self._name, id)
update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
if update_date:
ids_to_check.extend([id, update_date])
if not ids_to_check:
continue
cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
res = cr.fetchone()
if res:
# mention the first one only to keep the error message readable
raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
"""Verify the returned rows after applying record rules matches
the length of `ids`, and raise an appropriate exception if it does not.
"""
ids, result_ids = set(ids), set(result_ids)
missing_ids = ids - result_ids
if missing_ids:
# Attempt to distinguish record rule restriction vs deleted records,
# to provide a more specific error message - check if the missinf
cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
if cr.rowcount:
# the missing ids are (at least partially) hidden by access rules
if uid == SUPERUSER_ID:
return
_logger.warning('Access Denied by record rules for operation: %s, uid: %s, model: %s', operation, uid, self._name)
raise except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
else:
# If we get here, the missing_ids are not in the database
if operation in ('read','unlink'):
# No need to warn about deleting an already deleted record.
# And no error when reading a record that was deleted, to prevent spurious
# errors for non-transactional search/read sequences coming from clients
return
_logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
raise except_orm(_('Missing document(s)'),
_('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
"""Verifies that the operation given by ``operation`` is allowed for the user
according to the access rights."""
return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
"""Verifies that the operation given by ``operation`` is allowed for the user
according to ir.rules.
:param operation: one of ``write``, ``unlink``
:raise except_orm: * if current ir.rules do not permit this operation.
:return: None if the operation is allowed
"""
if uid == SUPERUSER_ID:
return
if self.is_transient():
# Only one single implicit access rule for transient models: owner only!
# This is ok to hardcode because we assert that TransientModels always
# have log_access enabled so that the create_uid column is always there.
# And even with _inherits, these fields are always present in the local
# table too, so no need for JOINs.
cr.execute("""SELECT distinct create_uid
FROM %s
WHERE id IN %%s""" % self._table, (tuple(ids),))
uids = [x[0] for x in cr.fetchall()]
if len(uids) != 1 or uids[0] != uid:
raise except_orm(_('Access Denied'),
_('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
else:
where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
if where_clause:
where_clause = ' and ' + ' and '.join(where_clause)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
' WHERE ' + self._table + '.id IN %s' + where_clause,
[sub_ids] + where_params)
returned_ids = [x['id'] for x in cr.dictfetchall()]
self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
"""Call given workflow trigger as a result of a CRUD operation"""
wf_service = netsvc.LocalService("workflow")
for res_id in ids:
getattr(wf_service, trigger)(uid, self._name, res_id, cr)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
"""Send given workflow signal and return a dict mapping ids to workflow results"""
wf_service = netsvc.LocalService("workflow")
result = {}
for res_id in ids:
result[res_id] = wf_service.trg_validate(uid, self._name, res_id, signal, cr)
return result
def unlink(self, cr, uid, ids, context=None):
"""
Delete records with given ids
:param cr: database cursor
:param uid: current user id
:param ids: id or list of ids
:param context: (optional) context arguments, like lang, time zone
:return: True
:raise AccessError: * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
:raise UserError: if the record is default property for other records
"""
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, uid, 'unlink')
ir_property = self.pool.get('ir.property')
# Check if the records are used as default properties.
domain = [('res_id', '=', False),
('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
if ir_property.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
# Delete the records' properties.
property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
ir_property.unlink(cr, uid, property_ids, context=context)
self._workflow_trigger(cr, uid, ids, 'trg_delete', context=context)
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
pool_model_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('delete from ' + self._table + ' ' \
'where id IN %s', (sub_ids,))
# Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
# as these are not connected with real database foreign keys, and would be dangling references.
# Note: following steps performed as admin to avoid access rights restrictions, and with no context
# to avoid possible side-effects during admin calls.
# Step 1. Calling unlink of ir_model_data only for the affected IDS
reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
# Step 2. Marching towards the real deletion of referenced records
if reference_ids:
pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
# For the same reason, removing the record relevant to ir_values
ir_value_ids = ir_values_obj.search(cr, uid,
['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
context=context)
if ir_value_ids:
ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
for order, object, store_ids, fields in result_store:
if object != self._name:
obj = self.pool.get(object)
cr.execute('select id from '+obj._table+' where id IN %s', (tuple(store_ids),))
rids = map(lambda x: x[0], cr.fetchall())
if rids:
obj._store_set_values(cr, uid, rids, fields, context)
return True
#
# TODO: Validate
#
def write(self, cr, user, ids, vals, context=None):
"""
Update records with given ids with the given field values
:param cr: database cursor
:param user: current user id
:type user: integer
:param ids: object id or list of object ids to update according to **vals**
:param vals: field values to update, e.g {'field_name': new_field_value, ...}
:type vals: dictionary
:param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
:type context: dictionary
:return: True
:raise AccessError: * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
**Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
+ For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
+ For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
"""
readonly = None
self.check_field_access_rights(cr, user, 'write', vals.keys())
for field in vals.copy():
fobj = None
if field in self._columns:
fobj = self._columns[field]
elif field in self._inherit_fields:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
if not edit:
vals.pop(field)
if not context:
context = {}
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, user, 'write')
result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
# No direct update of parent_left/right
vals.pop('parent_left', None)
vals.pop('parent_right', None)
parents_changed = []
parent_order = self._parent_order or self._order
if self._parent_store and (self._parent_name in vals):
# The parent_left/right computation may take up to
# 5 seconds. No need to recompute the values if the
# parent is the same.
# Note: to respect parent_order, nodes must be processed in
# order, so ``parents_changed`` must be ordered properly.
parent_val = vals[self._parent_name]
if parent_val:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
(self._table, self._parent_name, self._parent_name, parent_order)
cr.execute(query, (tuple(ids), parent_val))
else:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
(self._table, self._parent_name, parent_order)
cr.execute(query, (tuple(ids),))
parents_changed = map(operator.itemgetter(0), cr.fetchall())
upd0 = []
upd1 = []
upd_todo = []
updend = []
direct = []
totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
for field in vals:
field_column = self._all_columns.get(field) and self._all_columns.get(field).column
if field_column and field_column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
if field in self._columns:
if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
if (not totranslate) or not self._columns[field].translate:
upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
upd1.append(self._columns[field]._symbol_set[1](vals[field]))
direct.append(field)
else:
upd_todo.append(field)
else:
updend.append(field)
if field in self._columns \
and hasattr(self._columns[field], 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
upd0.append('write_uid=%s')
upd0.append("write_date=(now() at time zone 'UTC')")
upd1.append(user)
if len(upd0):
self.check_access_rule(cr, user, ids, 'write', context=context)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
'where id IN %s', upd1 + [sub_ids])
if cr.rowcount != len(sub_ids):
raise except_orm(_('AccessError'),
_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
if totranslate:
# TODO: optimize
for f in direct:
if self._columns[f].translate:
src_trans = self.pool.get(self._name).read(cr, user, ids, [f])[0][f]
if not src_trans:
src_trans = vals[f]
# Inserting value to DB
self.write(cr, user, ids, {f: vals[f]})
self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
# call the 'set' method of fields which are not classic_write
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
# default element in context must be removed when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
for field in upd_todo:
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
unknown_fields = updend[:]
for table in self._inherits:
col = self._inherits[table]
nids = []
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
'where id IN %s', (sub_ids,))
nids.extend([x[0] for x in cr.fetchall()])
v = {}
for val in updend:
if self._inherit_fields[val][0] == table:
v[val] = vals[val]
unknown_fields.remove(val)
if v:
self.pool.get(table).write(cr, user, nids, v, context)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
self._validate(cr, user, ids, context)
# TODO: use _order to set dest at the right position and not first node of parent
# We can't defer parent_store computation because the stored function
# fields that are computer may refer (directly or indirectly) to
# parent_left/right (via a child_of domain)
if parents_changed:
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
order = self._parent_order or self._order
parent_val = vals[self._parent_name]
if parent_val:
clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
else:
clause, params = '%s IS NULL' % (self._parent_name,), ()
for id in parents_changed:
cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
pleft, pright = cr.fetchone()
distance = pright - pleft + 1
# Positions of current siblings, to locate proper insertion point;
# this can _not_ be fetched outside the loop, as it needs to be refreshed
# after each update, in case several nodes are sequentially inserted one
# next to the other (i.e computed incrementally)
cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
parents = cr.fetchall()
# Find Position of the element
position = None
for (parent_pright, parent_id) in parents:
if parent_id == id:
break
position = parent_pright + 1
# It's the first node of the parent
if not position:
if not parent_val:
position = 1
else:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
position = cr.fetchone()[0] + 1
if pleft < position <= pright:
raise except_orm(_('UserError'), _('Recursivity Detected.'))
if pleft < position:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
else:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
result += self._store_get_values(cr, user, ids, vals.keys(), context)
result.sort()
done = {}
for order, object, ids_to_update, fields_to_recompute in result:
key = (object, tuple(fields_to_recompute))
done.setdefault(key, {})
# avoid to do several times the same computation
todo = []
for id in ids_to_update:
if id not in done[key]:
done[key][id] = True
todo.append(id)
self.pool.get(object)._store_set_values(cr, user, todo, fields_to_recompute, context)
self._workflow_trigger(cr, user, ids, 'trg_write', context=context)
return True
#
# TODO: Should set perm to user.xxx
#
def create(self, cr, user, vals, context=None):
"""
Create a new record for the model.
The values for the new record are initialized using the ``vals``
argument, and if necessary the result of ``default_get()``.
:param cr: database cursor
:param user: current user id
:type user: integer
:param vals: field values for new record, e.g {'field_name': field_value, ...}
:type vals: dictionary
:param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
:type context: dictionary
:return: id of new record created
:raise AccessError: * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
**Note**: The type of field values to pass in ``vals`` for relationship fields is specific.
Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how
to specify them.
"""
if not context:
context = {}
if self.is_transient():
self._transient_vacuum(cr, user)
self.check_access_rights(cr, user, 'create')
if self._log_access:
for f in LOG_ACCESS_COLUMNS:
if vals.pop(f, None) is not None:
_logger.warning(
'Field `%s` is not allowed when creating the model `%s`.',
f, self._name)
vals = self._add_missing_default_values(cr, user, vals, context)
tocreate = {}
for v in self._inherits:
if self._inherits[v] not in vals:
tocreate[v] = {}
else:
tocreate[v] = {'id': vals[self._inherits[v]]}
(upd0, upd1, upd2) = ('', '', [])
upd_todo = []
unknown_fields = []
for v in vals.keys():
if v in self._inherit_fields and v not in self._columns:
(table, col, col_detail, original_parent) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
else:
if (v not in self._inherit_fields) and (v not in self._columns):
del vals[v]
unknown_fields.append(v)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
# Try-except added to filter the creation of those records whose filds are readonly.
# Example : any dashboard which has all the fields readonly.(due to Views(database views))
try:
cr.execute("SELECT nextval('"+self._sequence+"')")
except:
raise except_orm(_('UserError'),
_('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.'))
id_new = cr.fetchone()[0]
for table in tocreate:
if self._inherits[table] in vals:
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
# When linking/creating parent records, force context without 'no_store_function' key that
# defers stored functions computing, as these won't be computed in batch at the end of create().
parent_context = dict(context)
parent_context.pop('no_store_function', None)
if record_id is None or not record_id:
record_id = self.pool.get(table).create(cr, user, tocreate[table], context=parent_context)
else:
self.pool.get(table).write(cr, user, [record_id], tocreate[table], context=parent_context)
upd0 += ',' + self._inherits[table]
upd1 += ',%s'
upd2.append(record_id)
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
for bool_field in bool_fields:
if bool_field not in vals:
vals[bool_field] = False
#End
for field in vals.copy():
fobj = None
if field in self._columns:
fobj = self._columns[field]
else:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
vals.pop(field)
for field in vals:
if self._columns[field]._classic_write:
upd0 = upd0 + ',"' + field + '"'
upd1 = upd1 + ',' + self._columns[field]._symbol_set[0]
upd2.append(self._columns[field]._symbol_set[1](vals[field]))
#for the function fields that receive a value, we set them directly in the database
#(they may be required), but we also need to trigger the _fct_inv()
if (hasattr(self._columns[field], '_fnct_inv')) and not isinstance(self._columns[field], fields.related):
#TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
#one week of the release candidate. It seems the only good way to handle correctly this is to add an
#attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
#if, for example, the related has a default value (for usability) then the fct_inv is called and it
#may raise some access rights error. Changing this is a too big change for now, and is thus postponed
#after the release but, definitively, the behavior shouldn't be different for related and function
#fields.
upd_todo.append(field)
else:
#TODO: this `if´ statement should be removed because there is no good reason to special case the fields
#related. See the above TODO comment for further explanations.
if not isinstance(self._columns[field], fields.related):
upd_todo.append(field)
if field in self._columns \
and hasattr(self._columns[field], 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
upd0 += ',create_uid,create_date,write_uid,write_date'
upd1 += ",%s,(now() at time zone 'UTC'),%s,(now() at time zone 'UTC')"
upd2.extend((user, user))
cr.execute('insert into "'+self._table+'" (id'+upd0+") values ("+str(id_new)+upd1+')', tuple(upd2))
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
if self._parent_store and not context.get('defer_parent_store_computation'):
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
parent = vals.get(self._parent_name, False)
if parent:
cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
pleft_old = None
result_p = cr.fetchall()
for (pleft,) in result_p:
if not pleft:
break
pleft_old = pleft
if not pleft_old:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
pleft_old = cr.fetchone()[0]
pleft = pleft_old
else:
cr.execute('select max(parent_right) from '+self._table)
pleft = cr.fetchone()[0] or 0
cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
# default element in context must be remove when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
result = []
for field in upd_todo:
result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
self._validate(cr, user, [id_new], context)
if not context.get('no_store_function', False):
result += self._store_get_values(cr, user, [id_new], vals.keys(), context)
result.sort()
done = []
for order, object, ids, fields2 in result:
if not (object, ids, fields2) in done:
self.pool.get(object)._store_set_values(cr, user, ids, fields2, context)
done.append((object, ids, fields2))
if self._log_create and not (context and context.get('no_store_function', False)):
message = self._description + \
" '" + \
self.name_get(cr, user, [id_new], context=context)[0][1] + \
"' " + _("created.")
self.log(cr, user, id_new, message, True, context=context)
self.check_access_rule(cr, user, [id_new], 'create', context=context)
self._workflow_trigger(cr, user, [id_new], 'trg_create', context=context)
return id_new
def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
"""Fetch records as objects allowing to use dot notation to browse fields and relations
:param cr: database cursor
:param uid: current user id
:param select: id or list of ids.
:param context: context arguments, like lang, time zone
:rtype: object or list of objects requested
"""
self._list_class = list_class or browse_record_list
cache = {}
# need to accepts ints and longs because ids coming from a method
# launched by button in the interface have a type long...
if isinstance(select, (int, long)):
return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process)
elif isinstance(select, list):
return self._list_class([browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select], context=context)
else:
return browse_null()
def _store_get_values(self, cr, uid, ids, fields, context):
"""Returns an ordered list of fields.functions to call due to
an update operation on ``fields`` of records with ``ids``,
obtained by calling the 'store' functions of these fields,
as setup by their 'store' attribute.
:return: [(priority, model_name, [record_ids,], [function_fields,])]
"""
if fields is None: fields = []
stored_functions = self.pool._store_function.get(self._name, [])
# use indexed names for the details of the stored_functions:
model_name_, func_field_to_compute_, id_mapping_fnct_, trigger_fields_, priority_ = range(5)
# only keep functions that should be triggered for the ``fields``
# being written to.
to_compute = [f for f in stored_functions \
if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
mapping = {}
fresults = {}
for function in to_compute:
fid = id(function[id_mapping_fnct_])
if not fid in fresults:
# use admin user for accessing objects having rules defined on store fields
fresults[fid] = [id2 for id2 in function[id_mapping_fnct_](self, cr, SUPERUSER_ID, ids, context) if id2]
target_ids = fresults[fid]
# the compound key must consider the priority and model name
key = (function[priority_], function[model_name_])
for target_id in target_ids:
mapping.setdefault(key, {}).setdefault(target_id,set()).add(tuple(function))
# Here mapping looks like:
# { (10, 'model_a') : { target_id1: [ (function_1_tuple, function_2_tuple) ], ... }
# (20, 'model_a') : { target_id2: [ (function_3_tuple, function_4_tuple) ], ... }
# (99, 'model_a') : { target_id1: [ (function_5_tuple, function_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = {}
for ((priority,model), id_map) in mapping.iteritems():
functions_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
for fid, functions in id_map.iteritems():
functions_ids_maps.setdefault(tuple(functions), []).append(fid)
for functions, ids in functions_ids_maps.iteritems():
call_map.setdefault((priority,model),[]).append((priority, model, ids,
[f[func_field_to_compute_] for f in functions]))
ordered_keys = call_map.keys()
ordered_keys.sort()
result = []
if ordered_keys:
result = reduce(operator.add, (call_map[k] for k in ordered_keys))
return result
def _store_set_values(self, cr, uid, ids, fields, context):
"""Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
respecting ``multi`` attributes), and stores the resulting values in the database directly."""
if not ids:
return True
field_flag = False
field_dict = {}
if self._log_access:
cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[1]:
field_dict.setdefault(r[0], [])
res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
for i in self.pool._store_function.get(self._name, []):
if i[5]:
up_write_date = write_date + datetime.timedelta(hours=i[5])
if datetime.datetime.now() < up_write_date:
if i[1] in fields:
field_dict[r[0]].append(i[1])
if not field_flag:
field_flag = True
todo = {}
keys = []
for f in fields:
if self._columns[f]._multi not in keys:
keys.append(self._columns[f]._multi)
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key in keys:
val = todo[key]
if key:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
for id, value in result.items():
if field_flag:
for f in value.keys():
if f in field_dict[id]:
value.pop(f)
upd0 = []
upd1 = []
for v in value:
if v not in val:
continue
if self._columns[v]._type == 'many2one':
try:
value[v] = value[v][0]
except:
pass
upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
upd1.append(self._columns[v]._symbol_set[1](value[v]))
upd1.append(id)
if upd0 and upd1:
cr.execute('update "' + self._table + '" set ' + \
','.join(upd0) + ' where id = %s', upd1)
else:
for f in val:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
for r in result.keys():
if field_flag:
if r in field_dict.keys():
if f in field_dict[r]:
result.pop(r)
for id, value in result.items():
if self._columns[f]._type == 'many2one':
try:
value = value[0]
except:
pass
cr.execute('update "' + self._table + '" set ' + \
'"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
return True
#
# TODO: Validate
#
def perm_write(self, cr, user, ids, fields, context=None):
raise NotImplementedError(_('This method does not exist anymore'))
# TODO: ameliorer avec NULL
def _where_calc(self, cr, user, domain, active_test=True, context=None):
"""Computes the WHERE clause needed to implement an OpenERP domain.
:param domain: the domain to compute
:type domain: list
:param active_test: whether the default filtering of records with ``active``
field set to ``False`` should be applied.
:return: the query expressing the given domain as provided in domain
:rtype: osv.query.Query
"""
if not context:
context = {}
domain = domain[:]
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
if domain:
# the item[0] trick below works for domain items and '&'/'|'/'!'
# operators too
if not any(item[0] == 'active' for item in domain):
domain.insert(0, ('active', '=', 1))
else:
domain = [('active', '=', 1)]
if domain:
e = expression.expression(cr, user, domain, self, context)
tables = e.get_tables()
where_clause, where_params = e.to_sql()
where_clause = where_clause and [where_clause] or []
else:
where_clause, where_params, tables = [], [], ['"%s"' % self._table]
return Query(tables, where_clause, where_params)
def _check_qorder(self, word):
if not regex_order.match(word):
raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
return True
def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
"""Add what's missing in ``query`` to implement all appropriate ir.rules
(using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
:param query: the current query object
"""
def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
""" :param string parent_model: string of the parent model
:param model child_object: model object, base of the rule application
"""
if added_clause:
if parent_model and child_object:
# as inherited rules are being applied, we need to add the missing JOIN
# to reach the parent table (if it was not JOINed yet in the query)
parent_alias = child_object._inherits_join_add(child_object, parent_model, query)
# inherited rules are applied on the external table -> need to get the alias and replace
parent_table = self.pool.get(parent_model)._table
added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
# change references to parent_table to parent_alias, because we now use the alias to refer to the table
new_tables = []
for table in added_tables:
# table is just a table name -> switch to the full alias
if table == '"%s"' % parent_table:
new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
# table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
else:
new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
added_tables = new_tables
query.where_clause += added_clause
query.where_clause_params += added_params
for table in added_tables:
if table not in query.tables:
query.tables.append(table)
return True
return False
# apply main rules on the object
rule_obj = self.pool.get('ir.rule')
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
# apply ir.rules from the parents (through _inherits)
for inherited_model in self._inherits:
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
parent_model=inherited_model, child_object=self)
def _generate_m2o_order_by(self, order_field, query):
"""
Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
either native m2o fields or function/related fields that are stored, including
intermediate JOINs for inheritance if required.
:return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
"""
if order_field not in self._columns and order_field in self._inherit_fields:
# also add missing joins for reaching the table containing the m2o field
qualified_field = self._inherits_join_calc(order_field, query)
order_field_column = self._inherit_fields[order_field][2]
else:
qualified_field = '"%s"."%s"' % (self._table, order_field)
order_field_column = self._columns[order_field]
assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
_logger.debug("Many2one function/related fields must be stored " \
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return
# figure out the applicable order_by for the m2o
dest_model = self.pool.get(order_field_column._obj)
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
else:
# extract the field names, to be able to qualify them and add desc/asc
m2o_order_list = []
for order_part in m2o_order.split(","):
m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
m2o_order = m2o_order_list
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
src_table, src_field = qualified_field.replace('"', '').split('.', 1)
dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
def _generate_order_by(self, order_spec, query):
"""
Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
:raise" except_orm in case order_spec is malformed
"""
order_by_clause = ''
order_spec = order_spec or self._order
if order_spec:
order_by_elements = []
self._check_qorder(order_spec)
for order_part in order_spec.split(','):
order_split = order_part.strip().split(' ')
order_field = order_split[0].strip()
order_direction = order_split[1].strip() if len(order_split) == 2 else ''
inner_clause = None
if order_field == 'id' or (self._log_access and order_field in LOG_ACCESS_COLUMNS.keys()):
order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
elif order_field in self._columns:
order_column = self._columns[order_field]
if order_column._classic_read:
inner_clause = '"%s"."%s"' % (self._table, order_field)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
elif order_field in self._inherit_fields:
parent_obj = self.pool.get(self._inherit_fields[order_field][3])
order_column = parent_obj._columns[order_field]
if order_column._classic_read:
inner_clause = self._inherits_join_calc(order_field, query)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
else:
raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
if inner_clause:
if isinstance(inner_clause, list):
for clause in inner_clause:
order_by_elements.append("%s %s" % (clause, order_direction))
else:
order_by_elements.append("%s %s" % (inner_clause, order_direction))
if order_by_elements:
order_by_clause = ",".join(order_by_elements)
return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
"""
Private implementation of search() method, allowing specifying the uid to use for the access right check.
This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
This is ok at the security level because this method is private and not callable through XML-RPC.
:param access_rights_uid: optional user ID to use when checking access rights
(not for ir.rules, this is only for ir.model.access)
"""
if context is None:
context = {}
self.check_access_rights(cr, access_rights_uid or user, 'read')
# For transient models, restrict acces to the current user, except for the super-user
if self.is_transient() and self._log_access and user != SUPERUSER_ID:
args = expression.AND(([('create_uid', '=', user)], args or []))
query = self._where_calc(cr, user, args, context=context)
self._apply_ir_rules(cr, user, query, 'read', context=context)
order_by = self._generate_order_by(order, query)
from_clause, where_clause, where_clause_params = query.get_sql()
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
where_str = where_clause and (" WHERE %s" % where_clause) or ''
if count:
cr.execute('SELECT count("%s".id) FROM ' % self._table + from_clause + where_str + limit_str + offset_str, where_clause_params)
res = cr.fetchall()
return res[0][0]
cr.execute('SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str, where_clause_params)
res = cr.fetchall()
# TDE note: with auto_join, we could have several lines about the same result
# i.e. a lead with several unread messages; we uniquify the result using
# a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
def _uniquify_list(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
return _uniquify_list([x[0] for x in res])
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
# a char field
def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
if not args:
args = []
if field in self._inherit_fields:
return self.pool.get(self._inherit_fields[field][0]).distinct_field_get(cr, uid, field, value, args, offset, limit)
else:
return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
def copy_data(self, cr, uid, id, default=None, context=None):
"""
Copy given record's data with all its fields values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: field values to override in the original values of the copied record
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: dictionary containing all the field values
"""
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_data_seen',{})
if id in seen_map.setdefault(self._name,[]):
return
seen_map[self._name].append(id)
if default is None:
default = {}
if 'state' not in default:
if 'state' in self._defaults:
if callable(self._defaults['state']):
default['state'] = self._defaults['state'](self, cr, uid, context)
else:
default['state'] = self._defaults['state']
context_wo_lang = context.copy()
if 'lang' in context:
del context_wo_lang['lang']
data = self.read(cr, uid, [id,], context=context_wo_lang)
if data:
data = data[0]
else:
raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
# build a black list of fields that should not be copied
blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
def blacklist_given_fields(obj):
# blacklist the fields that are given by inheritance
for other, field_to_other in obj._inherits.items():
blacklist.add(field_to_other)
if field_to_other in default:
# all the fields of 'other' are given by the record: default[field_to_other],
# except the ones redefined in self
blacklist.update(set(self.pool.get(other)._all_columns) - set(self._columns))
else:
blacklist_given_fields(self.pool.get(other))
blacklist_given_fields(self)
res = dict(default)
for f, colinfo in self._all_columns.items():
field = colinfo.column
if f in default:
pass
elif f in blacklist:
pass
elif isinstance(field, fields.function):
pass
elif field._type == 'many2one':
res[f] = data[f] and data[f][0]
elif field._type == 'one2many':
other = self.pool.get(field._obj)
# duplicate following the order of the ids because we'll rely on
# it later for copying translations in copy_translation()!
lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
# the lines are duplicated using the wrong (old) parent, but then
# are reassigned to the correct one thanks to the (0, 0, ...)
res[f] = [(0, 0, line) for line in lines if line]
elif field._type == 'many2many':
res[f] = [(6, 0, data[f])]
else:
res[f] = data[f]
return res
def copy_translations(self, cr, uid, old_id, new_id, context=None):
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_translations_seen',{})
if old_id in seen_map.setdefault(self._name,[]):
return
seen_map[self._name].append(old_id)
trans_obj = self.pool.get('ir.translation')
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fields = self.fields_get(cr, uid, context=context)
for field_name, field_def in fields.items():
# we must recursively copy the translations for o2o and o2m
if field_def['type'] == 'one2many':
target_obj = self.pool.get(field_def['relation'])
old_record, new_record = self.read(cr, uid, [old_id, new_id], [field_name], context=context)
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
old_children = sorted(old_record[field_name])
new_children = sorted(new_record[field_name])
for (old_child, new_child) in zip(old_children, new_children):
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
elif field_def.get('translate'):
if field_name in self._columns:
trans_name = self._name + "," + field_name
res_id = new_id
elif field_name in self._inherit_fields:
trans_name = self._inherit_fields[field_name][0] + "," + field_name
# get the id of the parent record to set the translation
inherit_field_name = self._inherit_fields[field_name][1]
res_id = self.read(cr, uid, [new_id], [inherit_field_name], context=context)[0][inherit_field_name][0]
else:
continue
trans_ids = trans_obj.search(cr, uid, [
('name', '=', trans_name),
('res_id', '=', old_id)
])
records = trans_obj.read(cr, uid, trans_ids, context=context)
for record in records:
del record['id']
# remove source to avoid triggering _set_src
del record['source']
record.update({'res_id': res_id})
trans_obj.create(cr, uid, record, context=context)
def copy(self, cr, uid, id, default=None, context=None):
"""
Duplicate record with given id updating it with default values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: id of the newly created record
"""
if context is None:
context = {}
context = context.copy()
data = self.copy_data(cr, uid, id, default, context)
new_id = self.create(cr, uid, data, context)
self.copy_translations(cr, uid, id, new_id, context)
return new_id
def exists(self, cr, uid, ids, context=None):
"""Checks whether the given id or ids exist in this model,
and return the list of ids that do. This is simple to use for
a truth test on a browse_record::
if record.exists():
pass
:param ids: id or list of ids to check for existence
:type ids: int or [int]
:return: the list of ids that currently exist, out of
the given `ids`
"""
if type(ids) in (int, long):
ids = [ids]
query = 'SELECT id FROM "%s"' % self._table
cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
return [x[0] for x in cr.fetchall()]
def check_recursion(self, cr, uid, ids, context=None, parent=None):
_logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
self._name)
assert parent is None or parent in self._columns or parent in self._inherit_fields,\
"The 'parent' parameter passed to check_recursion() must be None or a valid field name"
return self._check_recursion(cr, uid, ids, context, parent)
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param parent: optional parent field name (default: ``self._parent_name = parent_id``)
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
if not parent:
parent = self._parent_name
# must ignore 'active' flag, ir.rules, etc. => direct SQL query
query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
for id in ids:
current_id = id
while current_id is not None:
cr.execute(query, (current_id,))
result = cr.fetchone()
current_id = result[0] if result else None
if current_id == id:
return False
return True
def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID(s) of any database record.
**Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
:return: map of ids to the list of their fully qualified External IDs
in the form ``module.key``, or an empty list when there's no External
ID for a record, e.g.::
{ 'id': ['module.ext_id', 'module.ext_id_bis'],
'id2': [] }
"""
ir_model_data = self.pool.get('ir.model.data')
data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
result = {}
for id in ids:
# can't use dict.fromkeys() as the list would be shared!
result[id] = []
for record in data_results:
result[record['res_id']].append('%(module)s.%(name)s' % record)
return result
def get_external_id(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID of any database record, if there
is one. This method works as a possible implementation
for a function field, to be able to add it to any
model object easily, referencing it as ``Model.get_external_id``.
When multiple External IDs exist for a record, only one
of them is returned (randomly).
:return: map of ids to their fully qualified XML ID,
defaulting to an empty string when there's none
(to be usable as a function field),
e.g.::
{ 'id': 'module.ext_id',
'id2': '' }
"""
results = self._get_xml_ids(cr, uid, ids)
for k, v in results.iteritems():
if results[k]:
results[k] = v[0]
else:
results[k] = ''
return results
# backwards compatibility
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
# Transience
def is_transient(self):
""" Return whether the model is transient.
See :class:`TransientModel`.
"""
return self._transient
def _transient_clean_rows_older_than(self, cr, seconds):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
query = ("SELECT id FROM " + self._table + " WHERE"
" COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
" < ((now() at time zone 'UTC') - interval %s)")
cr.execute(query, ("%s seconds" % seconds,))
ids = [x[0] for x in cr.fetchall()]
self.unlink(cr, SUPERUSER_ID, ids)
def _transient_clean_old_rows(self, cr, max_count):
# Check how many rows we have in the table
cr.execute("SELECT count(*) AS row_count FROM " + self._table)
res = cr.fetchall()
if res[0][0] <= max_count:
return # max not reached, nothing to do
self._transient_clean_rows_older_than(cr, 300)
def _transient_vacuum(self, cr, uid, force=False):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
"_transient_max_count" or "_max_age" conditions (if any) are reached.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12 minutes
- count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
would immediately cause the maximum to be reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
_transient_check_time = 20 # arbitrary limit on vacuum executions
self._transient_check_count += 1
if not force and (self._transient_check_count < _transient_check_time):
return True # no vacuum cleaning this time
self._transient_check_count = 0
# Age-based expiration
if self._transient_max_hours:
self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
# Count-based expiration
if self._transient_max_count:
self._transient_clean_old_rows(cr, self._transient_max_count)
return True
def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
""" Serializes one2many and many2many commands into record dictionaries
(as if all the records came from the database via a read()). This
method is aimed at onchange methods on one2many and many2many fields.
Because commands might be creation commands, not all record dicts
will contain an ``id`` field. Commands matching an existing record
will have an ``id``.
:param field_name: name of the one2many or many2many field matching the commands
:type field_name: str
:param commands: one2many or many2many commands to execute on ``field_name``
:type commands: list((int|False, int|False, dict|False))
:param fields: list of fields to read from the database, when applicable
:type fields: list(str)
:returns: records in a shape similar to that returned by ``read()``
(except records may be missing the ``id`` field if they don't exist in db)
:rtype: list(dict)
"""
result = [] # result (list of dict)
record_ids = [] # ids of records to read
updates = {} # {id: dict} of updates on particular records
for command in commands:
if not isinstance(command, (list, tuple)):
record_ids.append(command)
elif command[0] == 0:
result.append(command[2])
elif command[0] == 1:
record_ids.append(command[1])
updates.setdefault(command[1], {}).update(command[2])
elif command[0] in (2, 3):
record_ids = [id for id in record_ids if id != command[1]]
elif command[0] == 4:
record_ids.append(command[1])
elif command[0] == 5:
result, record_ids = [], []
elif command[0] == 6:
result, record_ids = [], list(command[2])
# read the records and apply the updates
other_model = self.pool.get(self._all_columns[field_name].column._obj)
for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
record.update(updates.get(record['id'], {}))
result.append(record)
return result
# for backward compatibility
resolve_o2m_commands_to_record_dicts = resolve_2many_commands
def _register_hook(self, cr):
""" stuff to do right after the registry is built """
pass
# keep this import here, at top it will cause dependency cycle errors
import expression
class Model(BaseModel):
"""Main super-class for regular database-persisted OpenERP models.
OpenERP models are created by inheriting from this class::
class user(Model):
...
The system will later instantiate the class once per database (on
which the class' module is installed).
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False # True in a TransientModel
class TransientModel(BaseModel):
"""Model super-class for transient records, meant to be temporarily
persisted, and regularly vaccuum-cleaned.
A TransientModel has a simplified access rights management,
all users can create new records, and may only access the
records they created. The super-user has unrestricted access
to all TransientModel records.
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = True
class AbstractModel(BaseModel):
"""Abstract Model super-class for creating an abstract class meant to be
inherited by regular models (Models or TransientModels) but not meant to
be usable on its own, or persisted.
Technical note: we don't want to make AbstractModel the super-class of
Model or BaseModel because it would not make sense to put the main
definition of persistence methods such as create() in it, and still we
should be able to override them within an AbstractModel.
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
"""
if len(items) == 0:
return lambda a: ()
if len(items) == 1:
return lambda gettable: (gettable[items[0]],)
return operator.itemgetter(*items)
class ImportWarning(Warning):
""" Used to send warnings upwards the stack during the import process
"""
pass
def convert_pgerror_23502(model, fields, info, e):
m = re.match(r'^null value in column "(?P<field>\w+)" violates '
r'not-null constraint\n',
str(e))
field_name = m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"Missing required value for the field '%s'.") % field_name
field = fields.get(field_name)
if field:
message = _(u"%s This might be '%s' in the current model, or a field "
u"of the same name in an o2m.") % (message, field['string'])
return {
'message': message,
'field': field_name,
}
def convert_pgerror_23505(model, fields, info, e):
m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
str(e))
field_name = m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"The value for the field '%s' already exists.") % field_name
field = fields.get(field_name)
if field:
message = _(u"%s This might be '%s' in the current model, or a field "
u"of the same name in an o2m.") % (message, field['string'])
return {
'message': message,
'field': field_name,
}
PGERROR_TO_OE = collections.defaultdict(
# shape of mapped converters
lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
# not_null_violation
'23502': convert_pgerror_23502,
# unique constraint error
'23505': convert_pgerror_23505,
})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| xxshutong/openerp-7.0 | openerp/osv/orm.py | Python | agpl-3.0 | 265,245 |
import unittest
from labonneboite.common import locations
class CityLocationTest(unittest.TestCase):
def test_hyphenated_city_name(self):
city = locations.CityLocation('19100', 'brive-la-gaillarde')
self.assertEqual(city.name, 'Brive-la-Gaillarde')
def test_unicode_city_name(self):
city = locations.CityLocation('05100', 'briancon')
self.assertEqual(city.name, 'Briançon')
def test_no_slug(self):
city = locations.CityLocation('05100')
self.assertEqual(city.name, 'Briançon')
def test_accented_city_name(self):
city = locations.CityLocation('05100', 'Cervières')
self.assertEqual(city.name, 'Cervières')
self.assertEqual(6.756570896485574, city.location.longitude)
self.assertEqual(44.86053112144938, city.location.latitude)
| StartupsPoleEmploi/labonneboite | labonneboite/tests/app/test_locations.py | Python | agpl-3.0 | 833 |
'''
Driver file
To do:
- allow searching of a greater number of common players
- "vanity search" - highest damage differential:
- incorporate STEAMID conversion (shouldn't require the API)
- impose order on results
- start to store player aliases, associated with IDs
- write test
Tools:
Couple of urls:
http://logs.tf/profile/76561198055233348
http://logs.tf/profile/76561197993593754
'''
import player
# constants:
ID64_LENGTH = 17
MAX_PLAYERS = 12
# print title and link for logs common to players
def print_common_logs(players):
num_of_players = len(players)
if num_of_players > MAX_PLAYERS:
raise RuntimeError("Too many players for now!")
elif num_of_players == 0:
print("No players, no logs.")
else:
for key in players[0].data['logs']:
players_with_key = 1
for i in range(1, num_of_players):
if key in players[i].data['logs']:
players_with_key += 1
if players_with_key == num_of_players:
print(players[i].data['logs'][key]['title'])
print("http://logs.tf/" + str(players[i].data['logs'][key]['id']))
def main():
print("Hi! This script finds urls of logs common to up to 12 players.")
print("Enter their logs.tf profiles, then enter 's' to search.")
players = []
steam_id_64 = 0
# get and validate user input for ID
while True:
url_input = input("Enter player URL: ")
if url_input == 's' or url_input == 'S':
break
# id64 will follow this signature:
id_index = url_input.find("/profile/")
if id_index == -1:
print("Input not recognized. Please try again.")
continue
# get 17 digits following "/profile/"
steam_id_64 = url_input[id_index + 9: id_index + 26]
if len(steam_id_64) != 17:
print("Input not recognized. Please try again.")
continue
else:
p = player.Player(steam_id_64)
players.append(p)
print_common_logs(players)
if __name__ == "__main__":
main()
| triested/logs-search | main.py | Python | agpl-3.0 | 2,193 |
"""
Copyright (c) 2002 Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
EXPORT LAWS: THIS LICENSE ADDS NO RESTRICTIONS TO THE EXPORT LAWS OF
YOUR JURISDICTION. It is licensee's responsibility to comply with any
export regulations applicable in licensee's jurisdiction. Under
CURRENT (May 2000) U.S. export regulations this software is eligible
for export from the U.S. and can be downloaded by or otherwise
exported or reexported worldwide EXCEPT to U.S. embargoed destinations
which include Cuba, Iraq, Libya, North Korea, Iran, Syria, Sudan,
Afghanistan and any other country to which the U.S. has embargoed
goods and services.
DESCRIPTION: Agent HTTP thread that handles HTTP requests to view the
agent's current state.
AUTHOR: Brent Chun ([email protected])
$Id: agenthttpsvr.py,v 1.1 2003-08-19 17:17:19 aclement Exp $
"""
import SimpleHTTPServer
import SocketServer
import threading
import agent
class agenthttphandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write("%s" % agent.agenthtml(self.server.agent))
class agenthttpsvr(SocketServer.ThreadingTCPServer):
def __init__(self, server_address, RequestHandlerClass, agent):
import hacks
SocketServer.ThreadingTCPServer.allow_reuse_address = 1
try:
method = SocketServer.ThreadingTCPServer.__init__
args = [ self, server_address, RequestHandlerClass ]
hacks.retryapply(method, args, 10, 1)
except:
raise "Could not bind to TCP port %d" % server_address[1]
self.agent = agent
class agenthttpsvrthr(threading.Thread):
def __init__(self, agent):
threading.Thread.__init__(self)
self.server = agenthttpsvr(("", agent.conf.port), agenthttphandler, agent)
def run(self):
self.server.serve_forever()
| nmc-probe/emulab-nome | tbsetup/plab/libdslice/dslice/agenthttpsvr.py | Python | agpl-3.0 | 3,426 |
#
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from pixelated.support.encrypted_file_storage import EncryptedFileStorage
import os
import re
import dateutil.parser
from pixelated.adapter.model.status import Status
from pixelated.adapter.search.contacts import contacts_suggestions
from whoosh.index import FileIndex
from whoosh.fields import Schema, ID, KEYWORD, TEXT, NUMERIC
from whoosh.qparser import QueryParser
from whoosh.qparser import MultifieldParser
from whoosh.writing import AsyncWriter
from whoosh import sorting
from pixelated.support.functional import unique
import traceback
class SearchEngine(object):
DEFAULT_INDEX_HOME = os.path.join(os.environ['HOME'], '.leap')
DEFAULT_TAGS = ['inbox', 'sent', 'drafts', 'trash']
def __init__(self, key, agent_home=DEFAULT_INDEX_HOME):
self.key = key
self.index_folder = os.path.join(agent_home, 'search_index')
if not os.path.exists(self.index_folder):
os.makedirs(self.index_folder)
self._index = self._create_index()
def _add_to_tags(self, tags, group, skip_default_tags, count_type, query=None):
query_matcher = re.compile(".*%s.*" % query.lower()) if query else re.compile(".*")
for tag, count in group.iteritems():
if skip_default_tags and tag in self.DEFAULT_TAGS or not query_matcher.match(tag):
continue
if not tags.get(tag):
tags[tag] = {'ident': tag, 'name': tag, 'default': False, 'counts': {'total': 0, 'read': 0},
'mails': []}
tags[tag]['counts'][count_type] += count
def _search_tag_groups(self, is_filtering_tags):
seen = None
query_parser = QueryParser('tag', self._index.schema)
options = {'limit': None, 'groupedby': sorting.FieldFacet('tag', allow_overlap=True), 'maptype': sorting.Count}
with self._index.searcher() as searcher:
total = searcher.search(query_parser.parse('*'), **options).groups()
if not is_filtering_tags:
seen = searcher.search(query_parser.parse("* AND flags:%s" % Status.SEEN), **options).groups()
return seen, total
def _init_tags_defaults(self):
tags = {}
for default_tag in self.DEFAULT_TAGS:
tags[default_tag] = {
'ident': default_tag,
'name': default_tag,
'default': True,
'counts': {
'total': 0,
'read': 0
},
'mails': []
}
return tags
def _build_tags(self, seen, total, skip_default_tags, query):
tags = {}
if not skip_default_tags:
tags = self._init_tags_defaults()
self._add_to_tags(tags, total, skip_default_tags, count_type='total', query=query)
if seen:
self._add_to_tags(tags, seen, skip_default_tags, count_type='read')
return tags.values()
def tags(self, query, skip_default_tags):
is_filtering_tags = True if query else False
seen, total = self._search_tag_groups(is_filtering_tags=is_filtering_tags)
return self._build_tags(seen, total, skip_default_tags, query)
def _mail_schema(self):
return Schema(
ident=ID(stored=True, unique=True),
sender=ID(stored=False),
to=KEYWORD(stored=False, commas=True),
cc=KEYWORD(stored=False, commas=True),
bcc=KEYWORD(stored=False, commas=True),
subject=TEXT(stored=False),
date=NUMERIC(stored=False, sortable=True, bits=64, signed=False),
body=TEXT(stored=False),
tag=KEYWORD(stored=True, commas=True),
flags=KEYWORD(stored=True, commas=True),
raw=TEXT(stored=False))
def _create_index(self):
storage = EncryptedFileStorage(self.index_folder, self.key)
return FileIndex.create(storage, self._mail_schema(), indexname='mails')
def index_mail(self, mail):
with AsyncWriter(self._index) as writer:
self._index_mail(writer, mail)
def _index_mail(self, writer, mail):
mdict = mail.as_dict()
header = mdict['header']
tags = set(mdict.get('tags', {}))
tags.add(mail.mailbox_name.lower())
index_data = {
'sender': self._empty_string_to_none(header.get('from', '')),
'subject': self._empty_string_to_none(header.get('subject', '')),
'date': dateutil.parser.parse(header.get('date', '')).strftime('%s'),
'to': self._format_recipient(header, 'to'),
'cc': self._format_recipient(header, 'cc'),
'bcc': self._format_recipient(header, 'bcc'),
'tag': u','.join(unique(tags)),
'body': unicode(mdict['textPlainBody'] if 'textPlainBody' in mdict else mdict['body']),
'ident': unicode(mdict['ident']),
'flags': unicode(','.join(unique(mail.flags))),
'raw': unicode(mail.raw)
}
writer.update_document(**index_data)
def _format_recipient(self, headers, name):
list = headers.get(name, [''])
return u','.join(list) if list else u''
def _empty_string_to_none(self, field_value):
if not field_value:
return None
else:
return field_value
def index_mails(self, mails, callback=None):
try:
with AsyncWriter(self._index) as writer:
for mail in mails:
self._index_mail(writer, mail)
if callback:
callback()
except Exception, e:
traceback.print_exc(e)
raise
def _search_with_options(self, options, query):
with self._index.searcher() as searcher:
query = QueryParser('raw', self._index.schema).parse(query)
results = searcher.search(query, **options)
return results
def search(self, query, window=25, page=1, all_mails=False):
query = self.prepare_query(query)
return self._search_all_mails(query) if all_mails else self._paginated_search_mails(query, window, page)
def _search_all_mails(self, query):
with self._index.searcher() as searcher:
sorting_facet = sorting.FieldFacet('date', reverse=True)
results = searcher.search(query, sortedby=sorting_facet, reverse=True, limit=None)
return unique([mail['ident'] for mail in results])
def _paginated_search_mails(self, query, window, page):
page = int(page) if page is not None and int(page) > 1 else 1
window = int(window) if window is not None else 25
with self._index.searcher() as searcher:
tags_facet = sorting.FieldFacet('tag', allow_overlap=True, maptype=sorting.Count)
sorting_facet = sorting.FieldFacet('date', reverse=True)
results = searcher.search_page(query, page, pagelen=window, groupedby=tags_facet, sortedby=sorting_facet)
return unique([mail['ident'] for mail in results]), sum(results.results.groups().values())
def prepare_query(self, query):
query = (
query
.replace('-in:', 'AND NOT tag:')
.replace('in:all', '*')
)
return MultifieldParser(['raw', 'body'], self._index.schema).parse(query)
def remove_from_index(self, mail_id):
with AsyncWriter(self._index) as writer:
writer.delete_by_term('ident', mail_id)
def contacts(self, query):
with self._index.searcher() as searcher:
return contacts_suggestions(query, searcher)
| kaeff/pixelated-user-agent | service/pixelated/adapter/search/__init__.py | Python | agpl-3.0 | 8,292 |
# Generated by Django 1.9.11 on 2017-02-24 09:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('publisher_comments', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='comments',
name='comment_type',
field=models.CharField(blank=True, choices=[('default', 'Default'), ('decline_preview', 'Decline Preview')], default='default', max_length=255, null=True),
),
]
| edx/course-discovery | course_discovery/apps/publisher_comments/migrations/0002_comments_comment_type.py | Python | agpl-3.0 | 511 |
"""
Useful utilities for management commands.
"""
from django.core.management.base import CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from six import text_type
def get_mutually_exclusive_required_option(options, *selections):
"""
Validates that exactly one of the 2 given options is specified.
Returns the name of the found option.
"""
selected = [sel for sel in selections if options.get(sel)]
if len(selected) != 1:
selection_string = u', '.join('--{}'.format(selection) for selection in selections)
raise CommandError(u'Must specify exactly one of {}'.format(selection_string))
return selected[0]
def validate_mutually_exclusive_option(options, option_1, option_2):
"""
Validates that both of the 2 given options are not specified.
"""
if options.get(option_1) and options.get(option_2):
raise CommandError(u'Both --{} and --{} cannot be specified.'.format(option_1, option_2))
def validate_dependent_option(options, dependent_option, depending_on_option):
"""
Validates that option_1 is specified if dependent_option is specified.
"""
if options.get(dependent_option) and not options.get(depending_on_option):
raise CommandError(u'Option --{} requires option --{}.'.format(dependent_option, depending_on_option))
def parse_course_keys(course_key_strings):
"""
Parses and returns a list of CourseKey objects from the given
list of course key strings.
"""
try:
return [CourseKey.from_string(course_key_string) for course_key_string in course_key_strings]
except InvalidKeyError as error:
raise CommandError(u'Invalid key specified: {}'.format(text_type(error))) # lint-amnesty, pylint: disable=raise-missing-from
| stvstnfrd/edx-platform | openedx/core/lib/command_utils.py | Python | agpl-3.0 | 1,812 |
"""
Acceptance tests for the teams feature.
"""
import json
import random
import time
from dateutil.parser import parse
import ddt
from nose.plugins.attrib import attr
from selenium.common.exceptions import TimeoutException
from uuid import uuid4
from common.test.acceptance.tests.helpers import get_modal_alert, EventsTestMixin, UniqueCourseTest
from common.test.acceptance.fixtures import LMS_BASE_URL
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.fixtures.discussion import (
Thread,
MultipleThreadFixture,
ForumsConfigMixin,
)
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.course_info import CourseInfoPage
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.teams import (
TeamsPage,
MyTeamsPage,
BrowseTopicsPage,
BrowseTeamsPage,
TeamManagementPage,
EditMembershipPage,
TeamPage
)
from common.test.acceptance.pages.common.utils import confirm_prompt
TOPICS_PER_PAGE = 12
class TeamsTabBase(EventsTestMixin, ForumsConfigMixin, UniqueCourseTest):
"""Base class for Teams Tab tests"""
def setUp(self):
super(TeamsTabBase, self).setUp()
self.tab_nav = TabNavPage(self.browser)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.teams_page = TeamsPage(self.browser, self.course_id)
# TODO: Refactor so resetting events database is not necessary
self.reset_event_tracking()
self.enable_forums()
def create_topics(self, num_topics):
"""Create `num_topics` test topics."""
return [{u"description": i, u"name": i, u"id": i} for i in map(str, xrange(num_topics))]
def create_teams(self, topic, num_teams, time_between_creation=0):
"""Create `num_teams` teams belonging to `topic`."""
teams = []
for i in xrange(num_teams):
team = {
'course_id': self.course_id,
'topic_id': topic['id'],
'name': 'Team {}'.format(i),
'description': 'Description {}'.format(i),
'language': 'aa',
'country': 'AF'
}
teams.append(self.post_team_data(team))
# Sadly, this sleep is necessary in order to ensure that
# sorting by last_activity_at works correctly when running
# in Jenkins.
# THIS IS AN ANTI-PATTERN - DO NOT COPY.
time.sleep(time_between_creation)
return teams
def post_team_data(self, team_data):
"""Given a JSON representation of a team, post it to the server."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/teams/',
data=json.dumps(team_data),
headers=self.course_fixture.headers
)
self.assertEqual(response.status_code, 200)
return json.loads(response.text)
def create_memberships(self, num_memberships, team_id):
"""Create `num_memberships` users and assign them to `team_id`. The
last user created becomes the current user."""
memberships = []
for __ in xrange(num_memberships):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
memberships.append(user_info)
self.create_membership(user_info['username'], team_id)
#pylint: disable=attribute-defined-outside-init
self.user_info = memberships[-1]
return memberships
def create_membership(self, username, team_id):
"""Assign `username` to `team_id`."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/team_membership/',
data=json.dumps({'username': username, 'team_id': team_id}),
headers=self.course_fixture.headers
)
return json.loads(response.text)
def set_team_configuration(self, configuration, enroll_in_course=True, global_staff=False):
"""
Sets team configuration on the course and calls auto-auth on the user.
"""
#pylint: disable=attribute-defined-outside-init
self.course_fixture = CourseFixture(**self.course_info)
if configuration:
self.course_fixture.add_advanced_settings(
{u"teams_configuration": {u"value": configuration}}
)
self.course_fixture.install()
enroll_course_id = self.course_id if enroll_in_course else None
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, course_id=enroll_course_id, staff=global_staff).visit().user_info
self.course_info_page.visit()
def verify_teams_present(self, present):
"""
Verifies whether or not the teams tab is present. If it should be present, also
checks the text on the page (to ensure view is working).
"""
if present:
self.assertIn("Teams", self.tab_nav.tab_names)
self.teams_page.visit()
self.assertEqual(self.teams_page.active_tab(), 'browse')
else:
self.assertNotIn("Teams", self.tab_nav.tab_names)
def verify_teams(self, page, expected_teams):
"""Verify that the list of team cards on the current page match the expected teams in order."""
def assert_team_equal(expected_team, team_card_name, team_card_description):
"""
Helper to assert that a single team card has the expected name and
description.
"""
self.assertEqual(expected_team['name'], team_card_name)
self.assertEqual(expected_team['description'], team_card_description)
team_card_names = page.team_names
team_card_descriptions = page.team_descriptions
map(assert_team_equal, expected_teams, team_card_names, team_card_descriptions)
def verify_my_team_count(self, expected_number_of_teams):
""" Verify the number of teams shown on "My Team". """
# We are doing these operations on this top-level page object to avoid reloading the page.
self.teams_page.verify_my_team_count(expected_number_of_teams)
def only_team_events(self, event):
"""Filter out all non-team events."""
return event['event_type'].startswith('edx.team.')
@ddt.ddt
@attr(shard=5)
class TeamsTabTest(TeamsTabBase):
"""
Tests verifying when the Teams tab is present.
"""
def test_teams_not_enabled(self):
"""
Scenario: teams tab should not be present if no team configuration is set
Given I am enrolled in a course without team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(None)
self.verify_teams_present(False)
def test_teams_not_enabled_no_topics(self):
"""
Scenario: teams tab should not be present if team configuration does not specify topics
Given I am enrolled in a course with no topics in the team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": []})
self.verify_teams_present(False)
def test_teams_not_enabled_not_enrolled(self):
"""
Scenario: teams tab should not be present if student is not enrolled in the course
Given there is a course with team configuration and topics
And I am not enrolled in that course, and am not global staff
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False
)
self.verify_teams_present(False)
def test_teams_enabled(self):
"""
Scenario: teams tab should be present if user is enrolled in the course and it has team configuration
Given I am enrolled in a course with team configuration and topics
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(1)})
self.verify_teams_present(True)
def test_teams_enabled_global_staff(self):
"""
Scenario: teams tab should be present if user is not enrolled in the course, but is global staff
Given there is a course with team configuration
And I am not enrolled in that course, but am global staff
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False,
global_staff=True
)
self.verify_teams_present(True)
@ddt.data(
'topics/{topic_id}',
'topics/{topic_id}/search',
'teams/{topic_id}/{team_id}/edit-team',
'teams/{topic_id}/{team_id}'
)
def test_unauthorized_error_message(self, route):
"""Ensure that an error message is shown to the user if they attempt
to take an action which makes an AJAX request while not signed
in.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration(
{u'max_team_size': 10, u'topics': topics},
global_staff=True
)
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
self.browser.delete_cookie('sessionid')
url = self.browser.current_url.split('#')[0]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
)
)
)
self.teams_page.wait_for_ajax()
self.assertEqual(
self.teams_page.warning_message,
u"Your request could not be completed. Reload the page and try again."
)
@ddt.data(
('browse', '.topics-list'),
# TODO: find a reliable way to match the "My Teams" tab
# ('my-teams', 'div.teams-list'),
('teams/{topic_id}/{team_id}', 'div.discussion-module'),
('topics/{topic_id}/create-team', 'div.create-team-instructions'),
('topics/{topic_id}', '.teams-list'),
('not-a-real-route', 'div.warning')
)
@ddt.unpack
def test_url_routing(self, route, selector):
"""Ensure that navigating to a URL route correctly updates the page
content.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration({
u'max_team_size': 10,
u'topics': topics
})
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
# Get the base URL (the URL without any trailing fragment)
url = self.browser.current_url
fragment_index = url.find('#')
if fragment_index >= 0:
url = url[0:fragment_index]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
))
)
self.teams_page.wait_for_page()
self.teams_page.wait_for_ajax()
self.assertTrue(self.teams_page.q(css=selector).present)
self.assertTrue(self.teams_page.q(css=selector).visible)
@attr(shard=5)
class MyTeamsTest(TeamsTabBase):
"""
Tests for the "My Teams" tab of the Teams page.
"""
def setUp(self):
super(MyTeamsTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.my_teams_page = MyTeamsPage(self.browser, self.course_id)
self.page_viewed_event = {
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'my-teams',
'topic_id': None,
'team_id': None
}
}
def test_not_member_of_any_teams(self):
"""
Scenario: Visiting the My Teams page when user is not a member of any team should not display any teams.
Given I am enrolled in a course with a team configuration and a topic but am not a member of a team
When I visit the My Teams page
And I should see no teams
And I should see a message that I belong to no teams.
"""
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.assertEqual(len(self.my_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertEqual(
self.my_teams_page.q(css='.page-content-main').text,
[u'You are not currently a member of any team.']
)
def test_member_of_a_team(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am enrolled in a course with a team configuration and a topic and am a member of a team
When I visit the My Teams page
Then I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should not see a pagination footer
"""
teams = self.create_teams(self.topic, 1)
self.create_membership(self.user_info['username'], teams[0]['id'])
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.verify_teams(self.my_teams_page, teams)
def test_multiple_team_members(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am a member of a team with multiple members
When I visit the My Teams page
Then I should see the correct number of team members on my membership
"""
teams = self.create_teams(self.topic, 1)
self.create_memberships(4, teams[0]['id'])
self.my_teams_page.visit()
self.assertEqual(self.my_teams_page.team_memberships[0], '4 / 10 Members')
@attr(shard=5)
@ddt.ddt
class BrowseTopicsTest(TeamsTabBase):
"""
Tests for the Browse tab of the Teams page.
"""
def setUp(self):
super(BrowseTopicsTest, self).setUp()
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
@ddt.data(('name', False), ('team_count', True))
@ddt.unpack
def test_sort_topics(self, sort_order, reverse):
"""
Scenario: the user should be able to sort the list of topics by name or team count
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
When I choose a sort order
Then I should see the paginated list of topics in that order
"""
topics = self.create_topics(TOPICS_PER_PAGE + 1)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
for i, topic in enumerate(random.sample(topics, len(topics))):
self.create_teams(topic, i)
topic['team_count'] = i
self.topics_page.visit()
self.topics_page.sort_topics_by(sort_order)
topic_names = self.topics_page.topic_names
self.assertEqual(len(topic_names), TOPICS_PER_PAGE)
self.assertEqual(
topic_names,
[t['name'] for t in sorted(topics, key=lambda t: t[sort_order], reverse=reverse)][:TOPICS_PER_PAGE]
)
def test_sort_topics_update(self):
"""
Scenario: the list of topics should remain sorted after updates
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics and choose a sort order
Then I should see the paginated list of topics in that order
When I create a team in one of those topics
And I return to the topics list
Then I should see the topics in the correct sorted order
"""
topics = self.create_topics(3)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
self.topics_page.visit()
self.topics_page.sort_topics_by('team_count')
topic_name = self.topics_page.topic_names[-1]
topic = [t for t in topics if t['name'] == topic_name][0]
self.topics_page.browse_teams_for_topic(topic_name)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
browse_teams_page.wait_for_page()
browse_teams_page.click_create_team_link()
create_team_page = TeamManagementPage(self.browser, self.course_id, topic)
create_team_page.create_team()
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
team_page.click_all_topics()
self.topics_page.wait_for_page()
self.topics_page.wait_for_ajax()
self.assertEqual(topic_name, self.topics_page.topic_names[0])
def test_list_topics(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(2)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 2)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-2 out of 2 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_topic_pagination(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab, paginated 12 per page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see only the first 12 topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(20)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 20 total'))
self.assertTrue(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertTrue(self.topics_page.is_next_page_button_enabled())
def test_go_to_numbered_page(self):
"""
Scenario: topics should be able to be navigated by page number
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter a valid page number in the page number input
Then I should see that page of topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(25)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_go_to_invalid_page(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter an invalid page number in the page number input
Then I should stay on the current page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(self.topics_page.get_current_page_number(), 1)
def test_page_navigation_buttons(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
When I press the next page button
Then I should move to the next page
When I press the previous page button
Then I should move to the previous page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.press_next_page_button()
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 13-13 out of 13 total'))
self.topics_page.press_previous_page_button()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 13 total'))
def test_topic_pagination_one_page(self):
"""
Scenario: Browsing topics when there are fewer topics than the page size i.e. 12
all topics should show on one page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I should see corrected number of topic cards
And I should see the correct page header
And I should not see a pagination footer
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(10)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 10)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-10 out of 10 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
def test_topic_description_truncation(self):
"""
Scenario: excessively long topic descriptions should be truncated so
as to fit within a topic card.
Given I am enrolled in a course with a team configuration and a topic
with a long description
When I visit the Teams page
And I browse topics
Then I should see a truncated topic description
"""
initial_description = "A" + " really" * 50 + " long description"
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [{"name": "", "id": "", "description": initial_description}]}
)
self.topics_page.visit()
truncated_description = self.topics_page.topic_descriptions[0]
self.assertLess(len(truncated_description), len(initial_description))
self.assertTrue(truncated_description.endswith('...'))
self.assertIn(truncated_description.split('...')[0], initial_description)
def test_go_to_teams_list(self):
"""
Scenario: Clicking on a Topic Card should take you to the
teams list for that Topic.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
And I browse topics
And I click on the arrow link to view teams for the first topic
Then I should be on the browse teams page
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
self.topics_page.visit()
self.topics_page.browse_teams_for_topic('Example Topic')
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
browse_teams_page.wait_for_page()
self.assertEqual(browse_teams_page.header_name, 'Example Topic')
self.assertEqual(browse_teams_page.header_description, 'Description')
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse topics page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the browse topics page
Then my browser should post a page viewed event
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'browse',
'topic_id': None,
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.topics_page.visit()
@attr(shard=5)
@ddt.ddt
class BrowseTeamsWithinTopicTest(TeamsTabBase):
"""
Tests for browsing Teams within a Topic on the Teams page.
"""
TEAMS_PAGE_SIZE = 10
def setUp(self):
super(BrowseTeamsWithinTopicTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.max_team_size = 10
self.set_team_configuration({
'course_id': self.course_id,
'max_team_size': self.max_team_size,
'topics': [self.topic]
})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
def teams_with_default_sort_order(self, teams):
"""Return a list of teams sorted according to the default ordering
(last_activity_at, with a secondary sort by open slots).
"""
return sorted(
sorted(teams, key=lambda t: len(t['membership']), reverse=True),
key=lambda t: parse(t['last_activity_at']).replace(microsecond=0),
reverse=True
)
def verify_page_header(self):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(self.browse_teams_page.header_name, self.topic['name'])
self.assertEqual(self.browse_teams_page.header_description, self.topic['description'])
def verify_search_header(self, search_results_page, search_query):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(search_results_page.header_name, 'Team Search')
self.assertEqual(
search_results_page.header_description,
'Showing results for "{search_query}"'.format(search_query=search_query)
)
def verify_on_page(self, teams_page, page_num, total_teams, pagination_header_text, footer_visible):
"""
Verify that we are on the correct team list page.
Arguments:
teams_page (BaseTeamsPage): The teams page object that should be the current page.
page_num (int): The one-indexed page number that we expect to be on
total_teams (list): An unsorted list of all the teams for the
current topic
pagination_header_text (str): Text we expect to see in the
pagination header.
footer_visible (bool): Whether we expect to see the pagination
footer controls.
"""
sorted_teams = self.teams_with_default_sort_order(total_teams)
self.assertTrue(teams_page.get_pagination_header_text().startswith(pagination_header_text))
self.verify_teams(
teams_page,
sorted_teams[(page_num - 1) * self.TEAMS_PAGE_SIZE:page_num * self.TEAMS_PAGE_SIZE]
)
self.assertEqual(
teams_page.pagination_controls_visible(),
footer_visible,
msg='Expected paging footer to be ' + 'visible' if footer_visible else 'invisible'
)
@ddt.data(
('open_slots', 'last_activity_at', True),
('last_activity_at', 'open_slots', True)
)
@ddt.unpack
def test_sort_teams(self, sort_order, secondary_sort_order, reverse):
"""
Scenario: the user should be able to sort the list of teams by open slots or last activity
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic
When I choose a sort order
Then I should see the paginated list of teams in that order
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
for i, team in enumerate(random.sample(teams, len(teams))):
for _ in range(i):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
self.create_membership(user_info['username'], team['id'])
team['open_slots'] = self.max_team_size - i
# Re-authenticate as staff after creating users
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=True
).visit()
self.browse_teams_page.visit()
self.browse_teams_page.sort_teams_by(sort_order)
team_names = self.browse_teams_page.team_names
self.assertEqual(len(team_names), self.TEAMS_PAGE_SIZE)
sorted_teams = [
team['name']
for team in sorted(
sorted(teams, key=lambda t: t[secondary_sort_order], reverse=reverse),
key=lambda t: t[sort_order],
reverse=reverse
)
][:self.TEAMS_PAGE_SIZE]
self.assertEqual(team_names, sorted_teams)
def test_default_sort_order(self):
"""
Scenario: the list of teams should be sorted by last activity by default
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic, sorted by last activity
"""
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
self.browse_teams_page.visit()
self.assertEqual(self.browse_teams_page.sort_order, 'last activity')
def test_no_teams(self):
"""
Scenario: Visiting a topic with no teams should not display any teams.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing no teams
And I should see no teams
And I should see a button to add a team
And I should not see a pagination footer
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.assertEqual(len(self.browse_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_one_page(self):
"""
Scenario: Visiting a topic with fewer teams than the page size should
all those teams on one page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should see a button to add a team
And I should not see a pagination footer
"""
teams = self.teams_with_default_sort_order(
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE, time_between_creation=1)
)
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 1-10 out of 10 total'))
self.verify_teams(self.browse_teams_page, teams)
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_navigation_buttons(self):
"""
Scenario: The user should be able to page through a topic's team list
using navigation buttons when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I click on the next page button
Then I should see that I am on the second page of results
And when I click on the previous page button
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
self.browse_teams_page.press_next_page_button()
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-11 out of 11 total', True)
self.browse_teams_page.press_previous_page_button()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
def test_teams_page_input(self):
"""
Scenario: The user should be able to page through a topic's team list
using the page input when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I input the second page
Then I should see that I am on the second page of results
When I input the first page
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 10, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
self.browse_teams_page.go_to_page(2)
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-20 out of 20 total', True)
self.browse_teams_page.go_to_page(1)
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
def test_browse_team_topics(self):
"""
Scenario: User should be able to navigate to "browse all teams" and "search team description" links.
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic
Then I should see the correct page header
And I should see the link to "browse teams in other topics"
When I should navigate to that link
Then I should see the topic browse page
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.browse_teams_page.click_browse_all_teams_link()
self.topics_page.wait_for_page()
def test_search(self):
"""
Scenario: User should be able to search for a team
Given I am enrolled in a course with teams enabled
When I visit the Teams page for that topic
And I search for 'banana'
Then I should see the search result page
And the search header should be shown
And 0 results should be shown
And my browser should fire a page viewed event for the search page
And a searched event should have been fired
"""
# Note: all searches will return 0 results with the mock search server
# used by Bok Choy.
search_text = 'banana'
self.create_teams(self.topic, 5)
self.browse_teams_page.visit()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'search-teams',
'topic_id': self.topic['id'],
'team_id': None
}
}, {
'event_type': 'edx.team.searched',
'event': {
'search_text': search_text,
'topic_id': self.topic['id'],
'number_of_results': 0
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events, in_order=False):
search_results_page = self.browse_teams_page.search(search_text)
self.verify_search_header(search_results_page, search_text)
self.assertTrue(search_results_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
Then my browser should post a page viewed event for the teams page
"""
self.create_teams(self.topic, 5)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-topic',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.browse_teams_page.visit()
def test_team_name_xss(self):
"""
Scenario: Team names should be HTML-escaped on the teams page
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic, with a team name containing JS code
Then I should not see any alerts
"""
self.post_team_data({
'course_id': self.course_id,
'topic_id': self.topic['id'],
'name': '<script>alert("XSS")</script>',
'description': 'Description',
'language': 'aa',
'country': 'AF'
})
with self.assertRaises(TimeoutException):
self.browser.get(self.browse_teams_page.url)
alert = get_modal_alert(self.browser)
alert.accept()
@attr(shard=5)
class TeamFormActions(TeamsTabBase):
"""
Base class for create, edit, and delete team.
"""
TEAM_DESCRIPTION = 'The Avengers are a fictional team of superheroes.'
topic = {'name': 'Example Topic', 'id': 'example_topic', 'description': 'Description'}
TEAMS_NAME = 'Avengers'
def setUp(self):
super(TeamFormActions, self).setUp()
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
def verify_page_header(self, title, description, breadcrumbs):
"""
Verify that the page header correctly reflects the
create team header, description and breadcrumb.
"""
self.assertEqual(self.team_management_page.header_page_name, title)
self.assertEqual(self.team_management_page.header_page_description, description)
self.assertEqual(self.team_management_page.header_page_breadcrumbs, breadcrumbs)
def verify_and_navigate_to_create_team_page(self):
"""Navigates to the create team page and verifies."""
self.browse_teams_page.click_create_team_link()
self.verify_page_header(
title='Create a New Team',
description='Create a new team if you can\'t find an existing team to join, '
'or if you would like to learn with friends you know.',
breadcrumbs='All Topics {topic_name}'.format(topic_name=self.topic['name'])
)
def verify_and_navigate_to_edit_team_page(self):
"""Navigates to the edit team page and verifies."""
# pylint: disable=no-member
self.assertEqual(self.team_page.team_name, self.team['name'])
self.assertTrue(self.team_page.edit_team_button_present)
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
# Edit page header.
self.verify_page_header(
title='Edit Team',
description='If you make significant changes, make sure you notify '
'members of the team before making these changes.',
breadcrumbs='All Topics {topic_name} {team_name}'.format(
topic_name=self.topic['name'],
team_name=self.team['name']
)
)
def verify_team_info(self, name, description, location, language):
"""Verify the team information on team page."""
# pylint: disable=no-member
self.assertEqual(self.team_page.team_name, name)
self.assertEqual(self.team_page.team_description, description)
self.assertEqual(self.team_page.team_location, location)
self.assertEqual(self.team_page.team_language, language)
def fill_create_or_edit_form(self):
"""Fill the create/edit team form fields with appropriate values."""
self.team_management_page.value_for_text_field(
field_id='name',
value=self.TEAMS_NAME,
press_enter=False
)
self.team_management_page.set_value_for_textarea_field(
field_id='description',
value=self.TEAM_DESCRIPTION
)
self.team_management_page.value_for_dropdown_field(field_id='language', value='English')
self.team_management_page.value_for_dropdown_field(field_id='country', value='Pakistan')
def verify_all_fields_exist(self):
"""
Verify the fields for create/edit page.
"""
self.assertEqual(
self.team_management_page.message_for_field('name'),
'A name that identifies your team (maximum 255 characters).'
)
self.assertEqual(
self.team_management_page.message_for_textarea_field('description'),
'A short description of the team to help other learners understand '
'the goals or direction of the team (maximum 300 characters).'
)
self.assertEqual(
self.team_management_page.message_for_field('country'),
'The country that team members primarily identify with.'
)
self.assertEqual(
self.team_management_page.message_for_field('language'),
'The language that team members primarily use to communicate with each other.'
)
@ddt.ddt
class CreateTeamTest(TeamFormActions):
"""
Tests for creating a new Team within a Topic on the Teams page.
"""
def setUp(self):
super(CreateTeamTest, self).setUp()
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.browse_teams_page.visit()
def test_user_can_see_create_team_page(self):
"""
Scenario: The user should be able to see the create team page via teams list page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the Create Team page link on bottom
And When I click create team link
Then I should see the create team page.
And I should see the create team header
And I should also see the help messages for fields.
"""
self.verify_and_navigate_to_create_team_page()
self.verify_all_fields_exist()
def test_user_can_see_error_message_for_missing_data(self):
"""
Scenario: The user should be able to see error message in case of missing required field.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
And When I click create team button without filling required fields
Then I should see the error message and highlighted fields.
"""
self.verify_and_navigate_to_create_team_page()
# `submit_form` clicks on a button, but that button doesn't always
# have the click event handler registered on it in time. That's why
# this test is flaky. Unfortunately, I don't know of a straightforward
# way to write something that waits for that event handler to be bound
# to the button element. So I used time.sleep as well, even though
# the bok choy docs explicitly ask us not to:
# http://bok-choy.readthedocs.io/en/latest/guidelines.html
# Sorry! For the story to address this anti-pattern, see TNL-5820
time.sleep(0.5)
self.team_management_page.submit_form()
self.team_management_page.wait_for(
lambda: self.team_management_page.validation_message_text,
"Validation message text never loaded."
)
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
self.assertTrue(self.team_management_page.error_for_field(field_id='description'))
def test_user_can_see_error_message_for_incorrect_data(self):
"""
Scenario: The user should be able to see error message in case of increasing length for required fields.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I add text > than 255 characters for name field
And I click Create button
Then I should see the error message for exceeding length.
"""
self.verify_and_navigate_to_create_team_page()
# Fill the name field with >255 characters to see validation message.
self.team_management_page.value_for_text_field(
field_id='name',
value='EdX is a massive open online course (MOOC) provider and online learning platform. '
'It hosts online university-level courses in a wide range of disciplines to a worldwide '
'audience, some at no charge. It also conducts research into learning based on how '
'people use its platform. EdX was created for students and institutions that seek to'
'transform themselves through cutting-edge technologies, innovative pedagogy, and '
'rigorous courses. More than 70 schools, nonprofits, corporations, and international'
'organizations offer or plan to offer courses on the edX website. As of 22 October 2014,'
'edX has more than 4 million users taking more than 500 courses online.',
press_enter=False
)
self.team_management_page.submit_form()
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
def test_user_can_create_new_team_successfully(self):
"""
Scenario: The user should be able to create new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I fill all the fields present with appropriate data
And I click Create button
Then I expect analytics events to be emitted
And I should see the page for my team
And I should see the message that says "You are member of this team"
And the new team should be added to the list of teams within the topic
And the number of teams should be updated on the topic card
And if I switch to "My Team", the newly created team is displayed
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.browse_teams_page.visit()
self.verify_and_navigate_to_create_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.created'
},
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'added_on_create',
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_management_page.submit_form()
# Verify that the page is shown for the new team
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
self.assertEqual(team_page.team_name, self.TEAMS_NAME)
self.assertEqual(team_page.team_description, self.TEAM_DESCRIPTION)
self.assertEqual(team_page.team_user_membership_text, 'You are a member of this team.')
# Verify the new team was added to the topic list
self.teams_page.click_specific_topic("Example Topic")
self.teams_page.verify_topic_team_count(1)
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(1)
# Verify that if one switches to "My Team" without reloading the page, the newly created team is shown.
self.verify_my_team_count(1)
def test_user_can_cancel_the_team_creation(self):
"""
Scenario: The user should be able to cancel the creation of new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I click Cancel button
Then I should see teams list page without any new team.
And if I switch to "My Team", it shows no teams
"""
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.verify_and_navigate_to_create_team_page()
# We add a sleep here to allow time for the click event handler to bind
# to the cancel button. Using time.sleep in bok-choy tests is,
# generally, an anti-pattern. So don't copy this :).
# For the story to address this anti-pattern, see TNL-5820
time.sleep(0.5)
self.team_management_page.cancel_team()
self.browse_teams_page.wait_for_page()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(0)
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the create team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the create team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'new-team',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_create_team_page()
@ddt.ddt
class DeleteTeamTest(TeamFormActions):
"""
Tests for deleting teams.
"""
def setUp(self):
super(DeleteTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
#need to have a membership to confirm it gets deleted as well
self.create_membership(self.user_info['username'], self.team['id'])
self.team_page.visit()
def test_cancel_delete(self):
"""
Scenario: The user should be able to cancel the Delete Team dialog
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I cancel the prompt
And I refresh the page
Then I should still see the team
"""
self.delete_team(cancel=True)
self.team_management_page.wait_for_page()
self.browser.refresh()
self.team_management_page.wait_for_page()
self.assertEqual(
' '.join(('All Topics', self.topic['name'], self.team['name'])),
self.team_management_page.header_page_breadcrumbs
)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_delete_team(self, role):
"""
Scenario: The user should be able to see and navigate to the delete team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I confirm the prompt
Then I should see the browse teams page
And the team should not be present
"""
# If role is None, remain logged in as global staff
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.delete_team(require_notification=False)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
browse_teams_page.wait_for_page()
self.assertNotIn(self.team['name'], browse_teams_page.team_names)
def delete_team(self, **kwargs):
"""
Delete a team. Passes `kwargs` to `confirm_prompt`.
Expects edx.team.deleted event to be emitted, with correct course_id.
Also expects edx.team.learner_removed event to be emitted for the
membership that is removed as a part of the delete operation.
"""
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.team_management_page.delete_team_button.click()
if 'cancel' in kwargs and kwargs['cancel'] is True:
confirm_prompt(self.team_management_page, **kwargs)
else:
expected_events = [
{
'event_type': 'edx.team.deleted',
'event': {
'team_id': self.team['id']
}
},
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'team_deleted',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
confirm_prompt(self.team_management_page, **kwargs)
def test_delete_team_updates_topics(self):
"""
Scenario: Deleting a team should update the team count on the topics page
Given I am staff user for a course with a team
And I delete a team
When I navigate to the browse topics page
Then the team count for the deletd team's topic should be updated
"""
self.delete_team(require_notification=False)
BrowseTeamsPage(self.browser, self.course_id, self.topic).click_all_topics()
topics_page = BrowseTopicsPage(self.browser, self.course_id)
topics_page.wait_for_page()
self.teams_page.verify_topic_team_count(0)
@ddt.ddt
class EditTeamTest(TeamFormActions):
"""
Tests for editing the team.
"""
def setUp(self):
super(EditTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
self.team_page.visit()
def test_staff_can_navigate_to_edit_team_page(self):
"""
Scenario: The user should be able to see and navigate to the edit team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And I should see the edit team header
And I should also see the help messages for fields
"""
self.verify_and_navigate_to_edit_team_page()
self.verify_all_fields_exist()
def test_staff_can_edit_team_successfully(self):
"""
Scenario: The staff should be able to edit team successfully.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And an analytics event should be fired
When I edit all the fields with appropriate data
And I click Update button
Then I should see the page for my team with updated data
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'country',
'old': 'AF',
'new': 'PK',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'name',
'old': self.team['name'],
'new': self.TEAMS_NAME,
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'language',
'old': 'aa',
'new': 'en',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'description',
'old': self.team['description'],
'new': self.TEAM_DESCRIPTION,
'truncated': [],
}
},
]
with self.assert_events_match_during(
event_filter=self.only_team_events,
expected_events=expected_events,
):
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_staff_can_cancel_the_team_edit(self):
"""
Scenario: The user should be able to cancel the editing of team.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
Then I should see the Edit Team header
When I click Cancel button
Then I should see team page page without changes.
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.cancel_team()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
def test_student_cannot_see_edit_button(self):
"""
Scenario: The student should not see the edit team button.
Given I am student for a course with a team
When I visit the Team profile page
Then I should not see the Edit Team button
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page.visit()
self.assertFalse(self.team_page.edit_team_button_present)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged_user_can_edit_team(self, role):
"""
Scenario: The user with specified role should see the edit team button.
Given I am user with privileged role for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
"""
kwargs = {
'course_id': self.course_id,
'staff': False
}
if role is not None:
kwargs['roles'] = role
AutoAuthPage(self.browser, **kwargs).visit()
self.team_page.visit()
self.teams_page.wait_for_page()
self.assertTrue(self.team_page.edit_team_button_present)
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_page_viewed_event(self):
"""
Scenario: Visiting the edit team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the edit team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'edit-team',
'topic_id': self.topic['id'],
'team_id': self.team['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_edit_team_page()
@ddt.ddt
class EditMembershipTest(TeamFormActions):
"""
Tests for administrating from the team membership page
"""
def setUp(self):
super(EditMembershipTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
self.team = self.create_teams(self.topic, num_teams=1)[0]
#make sure a user exists on this team so we can edit the membership
self.create_membership(self.user_info['username'], self.team['id'])
self.edit_membership_page = EditMembershipPage(self.browser, self.course_id, self.team)
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
def edit_membership_helper(self, role, cancel=False):
"""
Helper for common functionality in edit membership tests.
Checks for all relevant assertions about membership being removed,
including verify edx.team.learner_removed events are emitted.
"""
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.assertTrue(
self.team_management_page.membership_button_present
)
self.team_management_page.click_membership_button()
self.edit_membership_page.wait_for_page()
self.edit_membership_page.click_first_remove()
if cancel:
self.edit_membership_page.cancel_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 1)
else:
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'removed_by_admin',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
self.edit_membership_page.confirm_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 0)
self.edit_membership_page.wait_for_page()
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and confirm the dialog
Then my membership should be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=False)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_cancel_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and cancel the dialog
Then my membership should not be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=True)
@attr(shard=5)
@ddt.ddt
class TeamPageTest(TeamsTabBase):
"""Tests for viewing a specific team"""
SEND_INVITE_TEXT = 'Send this link to friends so that they can join too.'
def setUp(self):
super(TeamPageTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
def _set_team_configuration_and_membership(
self,
max_team_size=10,
membership_team_index=0,
visit_team_index=0,
create_membership=True,
another_user=False):
"""
Set team configuration.
Arguments:
max_team_size (int): number of users a team can have
membership_team_index (int): index of team user will join
visit_team_index (int): index of team user will visit
create_membership (bool): whether to create membership or not
another_user (bool): another user to visit a team
"""
#pylint: disable=attribute-defined-outside-init
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': max_team_size, 'topics': [self.topic]}
)
self.teams = self.create_teams(self.topic, 2)
if create_membership:
self.create_membership(self.user_info['username'], self.teams[membership_team_index]['id'])
if another_user:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page = TeamPage(self.browser, self.course_id, self.teams[visit_team_index])
def setup_thread(self):
"""
Create and return a thread for this test's discussion topic.
"""
thread = Thread(
id="test_thread_{}".format(uuid4().hex),
commentable_id=self.teams[0]['discussion_topic_id'],
body="Dummy text body."
)
thread_fixture = MultipleThreadFixture([thread])
thread_fixture.push()
return thread
def setup_discussion_user(self, role=None, staff=False):
"""Set this test's user to have the given role in its
discussions. Role is one of 'Community TA', 'Moderator',
'Administrator', or 'Student'.
"""
kwargs = {
'course_id': self.course_id,
'staff': staff
}
if role is not None:
kwargs['roles'] = role
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, **kwargs).visit().user_info
def verify_teams_discussion_permissions(self, should_have_permission):
"""Verify that the teams discussion component is in the correct state
for the test user. If `should_have_permission` is True, assert that
the user can see controls for posting replies, voting, editing, and
deleting. Otherwise, assert that those controls are hidden.
"""
thread = self.setup_thread()
self.team_page.visit()
self.assertEqual(self.team_page.discussion_id, self.teams[0]['discussion_topic_id'])
discussion = self.team_page.discussion_page
discussion.wait_for_page()
self.assertTrue(discussion.is_discussion_expanded())
self.assertEqual(discussion.get_num_displayed_threads(), 1)
self.assertTrue(discussion.has_thread(thread['id']))
assertion = self.assertTrue if should_have_permission else self.assertFalse
assertion(discussion.q(css='.post-header-actions').present)
assertion(discussion.q(css='.add-response').present)
def test_discussion_on_my_team_page(self):
"""
Scenario: Team Page renders a discussion for a team to which I belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the existing thread
And I should see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership()
self.verify_teams_discussion_permissions(True)
@ddt.data(True, False)
def test_discussion_on_other_team_page(self, is_staff):
"""
Scenario: Team Page renders a team discussion for a team to which I do
not belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the team's thread
And I should not see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(staff=is_staff)
self.verify_teams_discussion_permissions(False)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged(self, role):
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(role=role)
self.verify_teams_discussion_permissions(True)
def assert_team_details(self, num_members, is_member=True, max_size=10):
"""
Verifies that user can see all the information, present on detail page according to their membership status.
Arguments:
num_members (int): number of users in a team
is_member (bool) default True: True if request user is member else False
max_size (int): number of users a team can have
"""
self.assertEqual(
self.team_page.team_capacity_text,
self.team_page.format_capacity_text(num_members, max_size)
)
self.assertEqual(self.team_page.team_location, 'Afghanistan')
self.assertEqual(self.team_page.team_language, 'Afar')
self.assertEqual(self.team_page.team_members, num_members)
if num_members > 0:
self.assertTrue(self.team_page.team_members_present)
else:
self.assertFalse(self.team_page.team_members_present)
if is_member:
self.assertEqual(self.team_page.team_user_membership_text, 'You are a member of this team.')
self.assertTrue(self.team_page.team_leave_link_present)
self.assertTrue(self.team_page.new_post_button_present)
else:
self.assertEqual(self.team_page.team_user_membership_text, '')
self.assertFalse(self.team_page.team_leave_link_present)
self.assertFalse(self.team_page.new_post_button_present)
def test_team_member_can_see_full_team_details(self):
"""
Scenario: Team member can see full info for team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see the full team detail
And I should see the team members
And I should see my team membership text
And I should see the language & country
And I should see the Leave Team and Invite Team
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assert_team_details(
num_members=1,
)
def test_other_users_can_see_limited_team_details(self):
"""
Scenario: Users who are not member of this team can only see limited info for this team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When I visit the Team page for that team
Then I should not see full team detail
And I should see the team members
And I should not see my team membership text
And I should not see the Leave Team and Invite Team links
"""
self._set_team_configuration_and_membership(create_membership=False)
self.team_page.visit()
self.assert_team_details(is_member=False, num_members=0)
def test_user_can_navigate_to_members_profile_page(self):
"""
Scenario: User can navigate to profile page via team member profile image.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see profile images for the team members
When I click on the first profile image
Then I should be taken to the user's profile page
And I should see the username on profile page
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
learner_name = self.team_page.first_member_username
self.team_page.click_first_profile_image()
learner_profile_page = LearnerProfilePage(self.browser, learner_name)
learner_profile_page.wait_for_page()
learner_profile_page.wait_for_field('username')
self.assertTrue(learner_profile_page.field_is_visible('username'))
def test_join_team(self):
"""
Scenario: User can join a Team if not a member already..
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I visit the Team page for that team
Then I should see Join Team button
And I should not see New Post button
When I click on Join Team button
Then there should be no Join Team button and no message
And an analytics event should be emitted
And I should see the updated information under Team Details
And I should see New Post button
And if I switch to "My Team", the team I have joined is displayed
"""
self._set_team_configuration_and_membership(create_membership=False)
teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
teams_page.visit()
teams_page.view_first_team()
self.assertTrue(self.team_page.join_team_button_present)
expected_events = [
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'joined_from_team_view'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_page.click_join_team_button()
self.assertFalse(self.team_page.join_team_button_present)
self.assertFalse(self.team_page.join_team_message_present)
self.assert_team_details(num_members=1, is_member=True)
# Verify that if one switches to "My Team" without reloading the page, the newly joined team is shown.
self.teams_page.click_all_topics()
self.verify_my_team_count(1)
def test_already_member_message(self):
"""
Scenario: User should see `You are already in a team` if user is a
member of other team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am already a member of a team
And I visit a team other than mine
Then I should see `You are already in a team` message
"""
self._set_team_configuration_and_membership(membership_team_index=0, visit_team_index=1)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'You already belong to another team.')
self.assert_team_details(num_members=0, is_member=False)
def test_team_full_message(self):
"""
Scenario: User should see `Team is full` message when team is full.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And team has no space left
And I am not a member of any team
And I visit the team
Then I should see `Team is full` message
"""
self._set_team_configuration_and_membership(
create_membership=True,
max_team_size=1,
membership_team_index=0,
visit_team_index=0,
another_user=True
)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'This team is full.')
self.assert_team_details(num_members=1, is_member=False, max_size=1)
def test_leave_team(self):
"""
Scenario: User can leave a team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am a member of team
And I visit the team
And I should not see Join Team button
And I should see New Post button
Then I should see Leave Team link
When I click on Leave Team link
Then user should be removed from team
And an analytics event should be emitted
And I should see Join Team button
And I should not see New Post button
And if I switch to "My Team", the team I have left is not displayed
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assertFalse(self.team_page.join_team_button_present)
self.assert_team_details(num_members=1)
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'remove_method': 'self_removal'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
# I think we're seeing the same problem that we're seeing in
# CreateTeamTest.test_user_can_see_error_message_for_missing_data.
# We click on the "leave team" link after it's loaded, but before
# its JavaScript event handler is added. Adding this sleep gives
# enough time for that event handler to bind to the link. Sorry!
# For the story to address this anti-pattern, see TNL-5820
time.sleep(0.5)
self.team_page.click_leave_team_link()
self.assert_team_details(num_members=0, is_member=False)
self.assertTrue(self.team_page.join_team_button_present)
# Verify that if one switches to "My Team" without reloading the page, the old team no longer shows.
self.teams_page.click_all_topics()
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the team profile page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the team profile page
Then my browser should post a page viewed event
"""
self._set_team_configuration_and_membership()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-team',
'topic_id': self.topic['id'],
'team_id': self.teams[0]['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.team_page.visit()
| itsjeyd/edx-platform | common/test/acceptance/tests/lms/test_teams.py | Python | agpl-3.0 | 84,795 |
#!/usr/bin/python
import os
import sys
pre = ''
for (path,dirs,files) in os.walk(sys.argv[1]) :
depth_from_root = len(path.split('/'))
#print 'DEPTH FROM ROOT %s' %depth_from_root
print '-'*(depth_from_root*4 + 8) + ' [ ' + path.split('/')[-1] + ']'
for file in files:
print '-'*(depth_from_root*4 +12) + ' ' +file
| arielravi/python-SPSE | src/directory-traversal.py | Python | agpl-3.0 | 329 |
import unittest
from coala_quickstart.info_extractors import Utilities
class UtititiesTest(unittest.TestCase):
def setUp(self):
self.simple_dict = {
'key1': 'value1',
'key2': 'value2'
}
self.nested_dict = {
'key1': {
'key1.1': 'value1.1'
}
}
self.nested_dict_with_list = {
'key1': {
'key1.1': [
{
'key_a': 'value_a'
},
{
'key_b': [
{
'key_b_1': 'value_b_1'
}]
}]
}
}
self.nested_list_with_dict = [
{
'key1': 'value1',
'key2': 'value2'
},
{
'key3': 'value3',
'key4': 'value4'
}
]
self.dict_with_repeated_structure = {
'key1': {
'key1.1': 'value1.1'
},
'key2': {
'key1.1': 'value1.1',
'key2.1': 'value2.1'
}
}
def test_search_object_recursively(self):
uut = Utilities.search_object_recursively
def assert_value_and_path(search_obj,
search_key,
search_value,
expected_results,
expected_paths):
search_results = uut(search_obj, search_key, search_value)
for obj, path in zip(expected_results, expected_paths):
expected_result = {
'object': obj,
'path': path
}
self.assertIn(expected_result, search_results)
assert_value_and_path(
self.simple_dict, 'key1', None, ['value1'], [('key1',)])
assert_value_and_path(self.simple_dict,
'key1',
'value1',
[{
'key1': 'value1',
'key2': 'value2'
}],
[('key1',)])
assert_value_and_path(self.nested_dict,
'key1.1',
None,
['value1.1'],
[('key1', 'key1.1')])
assert_value_and_path(self.nested_dict,
'key1.1',
'value1.1',
[{
'key1.1': 'value1.1'
}],
[('key1', 'key1.1')])
assert_value_and_path(self.nested_dict_with_list,
'key_b_1',
None,
['value_b_1'],
[('key1', 'key1.1', 1, 'key_b', 0, 'key_b_1')])
assert_value_and_path(self.nested_dict_with_list,
'key_b_1',
'value_b_1',
[{
'key_b_1': 'value_b_1'
}],
[('key1', 'key1.1', 1, 'key_b', 0, 'key_b_1')])
assert_value_and_path(self.nested_list_with_dict,
'key3',
None,
['value3'],
[(1, 'key3')])
assert_value_and_path(self.nested_list_with_dict,
'key3',
'value3',
[{
'key3': 'value3',
'key4': 'value4'
}],
[(1, 'key3')])
assert_value_and_path(self.dict_with_repeated_structure,
'key1.1',
None,
['value1.1', 'value1.1'],
[('key1', 'key1.1'), ('key2', 'key1.1')])
assert_value_and_path(self.dict_with_repeated_structure,
'key1.1',
'value1.1',
[
{
'key1.1': 'value1.1',
'key2.1': 'value2.1'
},
{
'key1.1': 'value1.1',
}
],
[('key2', 'key1.1'), ('key1', 'key1.1')])
| MalkmusT/coala-quickstart | tests/info_extractors/UtilitiesTest.py | Python | agpl-3.0 | 4,899 |
# -*- Mode: Python; py-indent-offset: 4 -*-
# coding=utf-8
# vim: tabstop=4 shiftwidth=4 expandtab
import sys
import unittest
import tempfile
import shutil
import os
import locale
import subprocess
from gi.repository import GObject
import gobject
from gi.repository import GIMarshallingTests
from compathelper import _bytes
if sys.version_info < (3, 0):
CONSTANT_UTF8 = "const \xe2\x99\xa5 utf8"
PY2_UNICODE_UTF8 = unicode(CONSTANT_UTF8, 'UTF-8')
CHAR_255='\xff'
else:
CONSTANT_UTF8 = "const ♥ utf8"
CHAR_255=bytes([255])
CONSTANT_NUMBER = 42
class Number(object):
def __init__(self, value):
self.value = value
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
class Sequence(object):
def __init__(self, sequence):
self.sequence = sequence
def __len__(self):
return len(self.sequence)
def __getitem__(self, key):
return self.sequence[key]
class TestConstant(unittest.TestCase):
# Blocked by https://bugzilla.gnome.org/show_bug.cgi?id=595773
# def test_constant_utf8(self):
# self.assertEquals(CONSTANT_UTF8, GIMarshallingTests.CONSTANT_UTF8)
def test_constant_number(self):
self.assertEquals(CONSTANT_NUMBER, GIMarshallingTests.CONSTANT_NUMBER)
class TestBoolean(unittest.TestCase):
def test_boolean_return(self):
self.assertEquals(True, GIMarshallingTests.boolean_return_true())
self.assertEquals(False, GIMarshallingTests.boolean_return_false())
def test_boolean_in(self):
GIMarshallingTests.boolean_in_true(True)
GIMarshallingTests.boolean_in_false(False)
GIMarshallingTests.boolean_in_true(1)
GIMarshallingTests.boolean_in_false(0)
def test_boolean_out(self):
self.assertEquals(True, GIMarshallingTests.boolean_out_true())
self.assertEquals(False, GIMarshallingTests.boolean_out_false())
def test_boolean_inout(self):
self.assertEquals(False, GIMarshallingTests.boolean_inout_true_false(True))
self.assertEquals(True, GIMarshallingTests.boolean_inout_false_true(False))
class TestInt8(unittest.TestCase):
MAX = GObject.G_MAXINT8
MIN = GObject.G_MININT8
def test_int8_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.int8_return_max())
self.assertEquals(self.MIN, GIMarshallingTests.int8_return_min())
def test_int8_in(self):
max = Number(self.MAX)
min = Number(self.MIN)
GIMarshallingTests.int8_in_max(max)
GIMarshallingTests.int8_in_min(min)
max.value += 1
min.value -= 1
self.assertRaises(ValueError, GIMarshallingTests.int8_in_max, max)
self.assertRaises(ValueError, GIMarshallingTests.int8_in_min, min)
self.assertRaises(TypeError, GIMarshallingTests.int8_in_max, "self.MAX")
def test_int8_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.int8_out_max())
self.assertEquals(self.MIN, GIMarshallingTests.int8_out_min())
def test_int8_inout(self):
self.assertEquals(self.MIN, GIMarshallingTests.int8_inout_max_min(Number(self.MAX)))
self.assertEquals(self.MAX, GIMarshallingTests.int8_inout_min_max(Number(self.MIN)))
class TestUInt8(unittest.TestCase):
MAX = GObject.G_MAXUINT8
def test_uint8_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint8_return())
def test_uint8_in(self):
number = Number(self.MAX)
GIMarshallingTests.uint8_in(number)
GIMarshallingTests.uint8_in(CHAR_255)
number.value += 1
self.assertRaises(ValueError, GIMarshallingTests.uint8_in, number)
self.assertRaises(ValueError, GIMarshallingTests.uint8_in, Number(-1))
self.assertRaises(TypeError, GIMarshallingTests.uint8_in, "self.MAX")
def test_uint8_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint8_out())
def test_uint8_inout(self):
self.assertEquals(0, GIMarshallingTests.uint8_inout(Number(self.MAX)))
class TestInt16(unittest.TestCase):
MAX = GObject.G_MAXINT16
MIN = GObject.G_MININT16
def test_int16_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.int16_return_max())
self.assertEquals(self.MIN, GIMarshallingTests.int16_return_min())
def test_int16_in(self):
max = Number(self.MAX)
min = Number(self.MIN)
GIMarshallingTests.int16_in_max(max)
GIMarshallingTests.int16_in_min(min)
max.value += 1
min.value -= 1
self.assertRaises(ValueError, GIMarshallingTests.int16_in_max, max)
self.assertRaises(ValueError, GIMarshallingTests.int16_in_min, min)
self.assertRaises(TypeError, GIMarshallingTests.int16_in_max, "self.MAX")
def test_int16_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.int16_out_max())
self.assertEquals(self.MIN, GIMarshallingTests.int16_out_min())
def test_int16_inout(self):
self.assertEquals(self.MIN, GIMarshallingTests.int16_inout_max_min(Number(self.MAX)))
self.assertEquals(self.MAX, GIMarshallingTests.int16_inout_min_max(Number(self.MIN)))
class TestUInt16(unittest.TestCase):
MAX = GObject.G_MAXUINT16
def test_uint16_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint16_return())
def test_uint16_in(self):
number = Number(self.MAX)
GIMarshallingTests.uint16_in(number)
number.value += 1
self.assertRaises(ValueError, GIMarshallingTests.uint16_in, number)
self.assertRaises(ValueError, GIMarshallingTests.uint16_in, Number(-1))
self.assertRaises(TypeError, GIMarshallingTests.uint16_in, "self.MAX")
def test_uint16_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint16_out())
def test_uint16_inout(self):
self.assertEquals(0, GIMarshallingTests.uint16_inout(Number(self.MAX)))
class TestInt32(unittest.TestCase):
MAX = GObject.G_MAXINT32
MIN = GObject.G_MININT32
def test_int32_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.int32_return_max())
self.assertEquals(self.MIN, GIMarshallingTests.int32_return_min())
def test_int32_in(self):
max = Number(self.MAX)
min = Number(self.MIN)
GIMarshallingTests.int32_in_max(max)
GIMarshallingTests.int32_in_min(min)
max.value += 1
min.value -= 1
self.assertRaises(ValueError, GIMarshallingTests.int32_in_max, max)
self.assertRaises(ValueError, GIMarshallingTests.int32_in_min, min)
self.assertRaises(TypeError, GIMarshallingTests.int32_in_max, "self.MAX")
def test_int32_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.int32_out_max())
self.assertEquals(self.MIN, GIMarshallingTests.int32_out_min())
def test_int32_inout(self):
self.assertEquals(self.MIN, GIMarshallingTests.int32_inout_max_min(Number(self.MAX)))
self.assertEquals(self.MAX, GIMarshallingTests.int32_inout_min_max(Number(self.MIN)))
class TestUInt32(unittest.TestCase):
MAX = GObject.G_MAXUINT32
def test_uint32_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint32_return())
def test_uint32_in(self):
number = Number(self.MAX)
GIMarshallingTests.uint32_in(number)
number.value += 1
self.assertRaises(ValueError, GIMarshallingTests.uint32_in, number)
self.assertRaises(ValueError, GIMarshallingTests.uint32_in, Number(-1))
self.assertRaises(TypeError, GIMarshallingTests.uint32_in, "self.MAX")
def test_uint32_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint32_out())
def test_uint32_inout(self):
self.assertEquals(0, GIMarshallingTests.uint32_inout(Number(self.MAX)))
class TestInt64(unittest.TestCase):
MAX = 2 ** 63 - 1
MIN = - (2 ** 63)
def test_int64_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.int64_return_max())
self.assertEquals(self.MIN, GIMarshallingTests.int64_return_min())
def test_int64_in(self):
max = Number(self.MAX)
min = Number(self.MIN)
GIMarshallingTests.int64_in_max(max)
GIMarshallingTests.int64_in_min(min)
max.value += 1
min.value -= 1
self.assertRaises(ValueError, GIMarshallingTests.int64_in_max, max)
self.assertRaises(ValueError, GIMarshallingTests.int64_in_min, min)
self.assertRaises(TypeError, GIMarshallingTests.int64_in_max, "self.MAX")
def test_int64_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.int64_out_max())
self.assertEquals(self.MIN, GIMarshallingTests.int64_out_min())
def test_int64_inout(self):
self.assertEquals(self.MIN, GIMarshallingTests.int64_inout_max_min(Number(self.MAX)))
self.assertEquals(self.MAX, GIMarshallingTests.int64_inout_min_max(Number(self.MIN)))
class TestUInt64(unittest.TestCase):
MAX = 2 ** 64 - 1
def test_uint64_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint64_return())
def test_uint64_in(self):
number = Number(self.MAX)
GIMarshallingTests.uint64_in(number)
number.value += 1
self.assertRaises(ValueError, GIMarshallingTests.uint64_in, number)
self.assertRaises(ValueError, GIMarshallingTests.uint64_in, Number(-1))
self.assertRaises(TypeError, GIMarshallingTests.uint64_in, "self.MAX")
def test_uint64_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint64_out())
def test_uint64_inout(self):
self.assertEquals(0, GIMarshallingTests.uint64_inout(Number(self.MAX)))
class TestShort(unittest.TestCase):
MAX = GObject.constants.G_MAXSHORT
MIN = GObject.constants.G_MINSHORT
def test_short_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.short_return_max())
self.assertEquals(self.MIN, GIMarshallingTests.short_return_min())
def test_short_in(self):
max = Number(self.MAX)
min = Number(self.MIN)
GIMarshallingTests.short_in_max(max)
GIMarshallingTests.short_in_min(min)
max.value += 1
min.value -= 1
self.assertRaises(ValueError, GIMarshallingTests.short_in_max, max)
self.assertRaises(ValueError, GIMarshallingTests.short_in_min, min)
self.assertRaises(TypeError, GIMarshallingTests.short_in_max, "self.MAX")
def test_short_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.short_out_max())
self.assertEquals(self.MIN, GIMarshallingTests.short_out_min())
def test_short_inout(self):
self.assertEquals(self.MIN, GIMarshallingTests.short_inout_max_min(Number(self.MAX)))
self.assertEquals(self.MAX, GIMarshallingTests.short_inout_min_max(Number(self.MIN)))
class TestUShort(unittest.TestCase):
MAX = GObject.constants.G_MAXUSHORT
def test_ushort_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.ushort_return())
def test_ushort_in(self):
number = Number(self.MAX)
GIMarshallingTests.ushort_in(number)
number.value += 1
self.assertRaises(ValueError, GIMarshallingTests.ushort_in, number)
self.assertRaises(ValueError, GIMarshallingTests.ushort_in, Number(-1))
self.assertRaises(TypeError, GIMarshallingTests.ushort_in, "self.MAX")
def test_ushort_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.ushort_out())
def test_ushort_inout(self):
self.assertEquals(0, GIMarshallingTests.ushort_inout(Number(self.MAX)))
class TestInt(unittest.TestCase):
MAX = GObject.constants.G_MAXINT
MIN = GObject.constants.G_MININT
def test_int_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.int_return_max())
self.assertEquals(self.MIN, GIMarshallingTests.int_return_min())
def test_int_in(self):
max = Number(self.MAX)
min = Number(self.MIN)
GIMarshallingTests.int_in_max(max)
GIMarshallingTests.int_in_min(min)
max.value += 1
min.value -= 1
self.assertRaises(ValueError, GIMarshallingTests.int_in_max, max)
self.assertRaises(ValueError, GIMarshallingTests.int_in_min, min)
self.assertRaises(TypeError, GIMarshallingTests.int_in_max, "self.MAX")
def test_int_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.int_out_max())
self.assertEquals(self.MIN, GIMarshallingTests.int_out_min())
def test_int_inout(self):
self.assertEquals(self.MIN, GIMarshallingTests.int_inout_max_min(Number(self.MAX)))
self.assertEquals(self.MAX, GIMarshallingTests.int_inout_min_max(Number(self.MIN)))
self.assertRaises(TypeError, GIMarshallingTests.int_inout_min_max, Number(self.MIN), CONSTANT_NUMBER)
class TestUInt(unittest.TestCase):
MAX = GObject.constants.G_MAXUINT
def test_uint_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint_return())
def test_uint_in(self):
number = Number(self.MAX)
GIMarshallingTests.uint_in(number)
number.value += 1
self.assertRaises(ValueError, GIMarshallingTests.uint_in, number)
self.assertRaises(ValueError, GIMarshallingTests.uint_in, Number(-1))
self.assertRaises(TypeError, GIMarshallingTests.uint_in, "self.MAX")
def test_uint_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.uint_out())
def test_uint_inout(self):
self.assertEquals(0, GIMarshallingTests.uint_inout(Number(self.MAX)))
class TestLong(unittest.TestCase):
MAX = GObject.constants.G_MAXLONG
MIN = GObject.constants.G_MINLONG
def test_long_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.long_return_max())
self.assertEquals(self.MIN, GIMarshallingTests.long_return_min())
def test_long_in(self):
max = Number(self.MAX)
min = Number(self.MIN)
GIMarshallingTests.long_in_max(max)
GIMarshallingTests.long_in_min(min)
max.value += 1
min.value -= 1
self.assertRaises(ValueError, GIMarshallingTests.long_in_max, max)
self.assertRaises(ValueError, GIMarshallingTests.long_in_min, min)
self.assertRaises(TypeError, GIMarshallingTests.long_in_max, "self.MAX")
def test_long_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.long_out_max())
self.assertEquals(self.MIN, GIMarshallingTests.long_out_min())
def test_long_inout(self):
self.assertEquals(self.MIN, GIMarshallingTests.long_inout_max_min(Number(self.MAX)))
self.assertEquals(self.MAX, GIMarshallingTests.long_inout_min_max(Number(self.MIN)))
class TestULong(unittest.TestCase):
MAX = GObject.constants.G_MAXULONG
def test_ulong_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.ulong_return())
def test_ulong_in(self):
number = Number(self.MAX)
GIMarshallingTests.ulong_in(number)
number.value += 1
self.assertRaises(ValueError, GIMarshallingTests.ulong_in, number)
self.assertRaises(ValueError, GIMarshallingTests.ulong_in, Number(-1))
self.assertRaises(TypeError, GIMarshallingTests.ulong_in, "self.MAX")
def test_ulong_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.ulong_out())
def test_ulong_inout(self):
self.assertEquals(0, GIMarshallingTests.ulong_inout(Number(self.MAX)))
class TestSSize(unittest.TestCase):
MAX = GObject.constants.G_MAXLONG
MIN = GObject.constants.G_MINLONG
def test_ssize_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.ssize_return_max())
self.assertEquals(self.MIN, GIMarshallingTests.ssize_return_min())
def test_ssize_in(self):
max = Number(self.MAX)
min = Number(self.MIN)
GIMarshallingTests.ssize_in_max(max)
GIMarshallingTests.ssize_in_min(min)
max.value += 1
min.value -= 1
self.assertRaises(ValueError, GIMarshallingTests.ssize_in_max, max)
self.assertRaises(ValueError, GIMarshallingTests.ssize_in_min, min)
self.assertRaises(TypeError, GIMarshallingTests.ssize_in_max, "self.MAX")
def test_ssize_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.ssize_out_max())
self.assertEquals(self.MIN, GIMarshallingTests.ssize_out_min())
def test_ssize_inout(self):
self.assertEquals(self.MIN, GIMarshallingTests.ssize_inout_max_min(Number(self.MAX)))
self.assertEquals(self.MAX, GIMarshallingTests.ssize_inout_min_max(Number(self.MIN)))
class TestSize(unittest.TestCase):
MAX = GObject.constants.G_MAXULONG
def test_size_return(self):
self.assertEquals(self.MAX, GIMarshallingTests.size_return())
def test_size_in(self):
number = Number(self.MAX)
GIMarshallingTests.size_in(number)
number.value += 1
self.assertRaises(ValueError, GIMarshallingTests.size_in, number)
self.assertRaises(ValueError, GIMarshallingTests.size_in, Number(-1))
self.assertRaises(TypeError, GIMarshallingTests.size_in, "self.MAX")
def test_size_out(self):
self.assertEquals(self.MAX, GIMarshallingTests.size_out())
def test_size_inout(self):
self.assertEquals(0, GIMarshallingTests.size_inout(Number(self.MAX)))
class TestFloat(unittest.TestCase):
MAX = GObject.constants.G_MAXFLOAT
MIN = GObject.constants.G_MINFLOAT
def test_float_return(self):
self.assertAlmostEquals(self.MAX, GIMarshallingTests.float_return())
def test_float_in(self):
GIMarshallingTests.float_in(Number(self.MAX))
self.assertRaises(TypeError, GIMarshallingTests.float_in, "self.MAX")
def test_float_out(self):
self.assertAlmostEquals(self.MAX, GIMarshallingTests.float_out())
def test_float_inout(self):
self.assertAlmostEquals(self.MIN, GIMarshallingTests.float_inout(Number(self.MAX)))
class TestDouble(unittest.TestCase):
MAX = GObject.constants.G_MAXDOUBLE
MIN = GObject.constants.G_MINDOUBLE
def test_double_return(self):
self.assertAlmostEquals(self.MAX, GIMarshallingTests.double_return())
def test_double_in(self):
GIMarshallingTests.double_in(Number(self.MAX))
self.assertRaises(TypeError, GIMarshallingTests.double_in, "self.MAX")
def test_double_out(self):
self.assertAlmostEquals(self.MAX, GIMarshallingTests.double_out())
def test_double_inout(self):
self.assertAlmostEquals(self.MIN, GIMarshallingTests.double_inout(Number(self.MAX)))
class TestGType(unittest.TestCase):
def test_gtype_return(self):
self.assertEquals(GObject.TYPE_NONE, GIMarshallingTests.gtype_return())
def test_gtype_in(self):
GIMarshallingTests.gtype_in(GObject.TYPE_NONE)
self.assertRaises(TypeError, GIMarshallingTests.gtype_in, "GObject.TYPE_NONE")
def test_gtype_out(self):
self.assertEquals(GObject.TYPE_NONE, GIMarshallingTests.gtype_out())
def test_gtype_inout(self):
self.assertEquals(GObject.TYPE_INT, GIMarshallingTests.gtype_inout(GObject.TYPE_NONE))
class TestUtf8(unittest.TestCase):
def test_utf8_none_return(self):
self.assertEquals(CONSTANT_UTF8, GIMarshallingTests.utf8_none_return())
def test_utf8_full_return(self):
self.assertEquals(CONSTANT_UTF8, GIMarshallingTests.utf8_full_return())
def test_utf8_none_in(self):
GIMarshallingTests.utf8_none_in(CONSTANT_UTF8)
if sys.version_info < (3, 0):
GIMarshallingTests.utf8_none_in(PY2_UNICODE_UTF8)
self.assertRaises(TypeError, GIMarshallingTests.utf8_none_in, CONSTANT_NUMBER)
self.assertRaises(TypeError, GIMarshallingTests.utf8_none_in, None)
def test_utf8_none_out(self):
self.assertEquals(CONSTANT_UTF8, GIMarshallingTests.utf8_none_out())
def test_utf8_full_out(self):
self.assertEquals(CONSTANT_UTF8, GIMarshallingTests.utf8_full_out())
def test_utf8_dangling_out(self):
GIMarshallingTests.utf8_dangling_out()
def test_utf8_none_inout(self):
self.assertEquals("", GIMarshallingTests.utf8_none_inout(CONSTANT_UTF8))
def test_utf8_full_inout(self):
self.assertEquals("", GIMarshallingTests.utf8_full_inout(CONSTANT_UTF8))
class TestArray(unittest.TestCase):
def test_array_fixed_int_return(self):
self.assertEquals([-1, 0, 1, 2], GIMarshallingTests.array_fixed_int_return())
def test_array_fixed_short_return(self):
self.assertEquals([-1, 0, 1, 2], GIMarshallingTests.array_fixed_short_return())
def test_array_fixed_int_in(self):
GIMarshallingTests.array_fixed_int_in(Sequence([-1, 0, 1, 2]))
self.assertRaises(TypeError, GIMarshallingTests.array_fixed_int_in, Sequence([-1, '0', 1, 2]))
self.assertRaises(TypeError, GIMarshallingTests.array_fixed_int_in, 42)
self.assertRaises(TypeError, GIMarshallingTests.array_fixed_int_in, None)
def test_array_fixed_short_in(self):
GIMarshallingTests.array_fixed_short_in(Sequence([-1, 0, 1, 2]))
def test_array_fixed_out(self):
self.assertEquals([-1, 0, 1, 2], GIMarshallingTests.array_fixed_out())
def test_array_fixed_inout(self):
self.assertEquals([2, 1, 0, -1], GIMarshallingTests.array_fixed_inout([-1, 0, 1, 2]))
def test_array_return(self):
self.assertEquals([-1, 0, 1, 2], GIMarshallingTests.array_return())
def test_array_in(self):
GIMarshallingTests.array_in(Sequence([-1, 0, 1, 2]))
def test_array_uint8_in(self):
GIMarshallingTests.array_uint8_in(Sequence([97, 98, 99, 100]))
GIMarshallingTests.array_uint8_in(_bytes("abcd"))
def test_array_out(self):
self.assertEquals([-1, 0, 1, 2], GIMarshallingTests.array_out())
def test_array_inout(self):
self.assertEquals([-2, -1, 0, 1, 2], GIMarshallingTests.array_inout(Sequence([-1, 0, 1, 2])))
def test_method_array_in(self):
object_ = GIMarshallingTests.Object()
object_.method_array_in(Sequence([-1, 0, 1, 2]))
def test_method_array_out(self):
object_ = GIMarshallingTests.Object()
self.assertEquals([-1, 0, 1, 2], object_.method_array_out())
def test_method_array_inout(self):
object_ = GIMarshallingTests.Object()
self.assertEquals([-2, -1, 0, 1, 2], object_.method_array_inout(Sequence([-1, 0, 1, 2])))
def test_method_array_return(self):
object_ = GIMarshallingTests.Object()
self.assertEquals([-1, 0, 1, 2], object_.method_array_return())
def test_array_fixed_out_struct(self):
struct1, struct2 = GIMarshallingTests.array_fixed_out_struct()
self.assertEquals(7, struct1.long_)
self.assertEquals(6, struct1.int8)
self.assertEquals(6, struct2.long_)
self.assertEquals(7, struct2.int8)
def test_array_zero_terminated_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.array_zero_terminated_return())
def test_array_zero_terminated_in(self):
GIMarshallingTests.array_zero_terminated_in(Sequence(['0', '1', '2']))
def test_array_zero_terminated_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.array_zero_terminated_out())
def test_array_zero_terminated_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.array_zero_terminated_out())
def test_array_zero_terminated_inout(self):
self.assertEquals(['-1', '0', '1', '2'], GIMarshallingTests.array_zero_terminated_inout(['0', '1', '2']))
def test_gstrv_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gstrv_return())
def test_gstrv_in(self):
GIMarshallingTests.gstrv_in(Sequence(['0', '1', '2']))
def test_gstrv_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gstrv_out())
def test_gstrv_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gstrv_out())
def test_gstrv_inout(self):
self.assertEquals(['-1', '0', '1', '2'], GIMarshallingTests.gstrv_inout(['0', '1', '2']))
class TestGArray(unittest.TestCase):
def test_garray_int_none_return(self):
self.assertEquals([-1, 0, 1, 2], GIMarshallingTests.garray_int_none_return())
def test_garray_utf8_none_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.garray_utf8_none_return())
def test_garray_utf8_container_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.garray_utf8_container_return())
def test_garray_utf8_full_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.garray_utf8_full_return())
def test_garray_int_none_in(self):
GIMarshallingTests.garray_int_none_in(Sequence([-1, 0, 1, 2]))
self.assertRaises(TypeError, GIMarshallingTests.garray_int_none_in, Sequence([-1, '0', 1, 2]))
self.assertRaises(TypeError, GIMarshallingTests.garray_int_none_in, 42)
self.assertRaises(TypeError, GIMarshallingTests.garray_int_none_in, None)
def test_garray_utf8_none_in(self):
GIMarshallingTests.garray_utf8_none_in(Sequence(['0', '1', '2']))
def test_garray_utf8_none_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.garray_utf8_none_out())
def test_garray_utf8_container_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.garray_utf8_container_out())
def test_garray_utf8_full_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.garray_utf8_full_out())
def test_garray_utf8_none_inout(self):
self.assertEquals(['-2', '-1', '0', '1'], GIMarshallingTests.garray_utf8_none_inout(Sequence(('0', '1', '2'))))
def test_garray_utf8_container_inout(self):
self.assertEquals(['-2', '-1','0', '1'], GIMarshallingTests.garray_utf8_container_inout(['0', '1', '2']))
def test_garray_utf8_full_inout(self):
self.assertEquals(['-2', '-1','0', '1'], GIMarshallingTests.garray_utf8_full_inout(['0', '1', '2']))
class TestGPtrArray(unittest.TestCase):
def test_gptrarray_int_none_return(self):
self.assertEquals([0, 1, 2, 3], GIMarshallingTests.gptrarray_int_none_return())
def test_gptrarray_utf8_none_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gptrarray_utf8_none_return())
def test_gptrarray_utf8_container_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gptrarray_utf8_container_return())
def test_gptrarray_utf8_full_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gptrarray_utf8_full_return())
def test_gptrarray_int_none_in(self):
GIMarshallingTests.gptrarray_int_none_in(Sequence([0, 1, 2, 3]))
self.assertRaises(TypeError, GIMarshallingTests.gptrarray_int_none_in, Sequence([-1, '0', 1, 2]))
self.assertRaises(TypeError, GIMarshallingTests.gptrarray_int_none_in, 42)
self.assertRaises(TypeError, GIMarshallingTests.gptrarray_int_none_in, None)
def test_gptrarray_utf8_none_in(self):
GIMarshallingTests.gptrarray_utf8_none_in(Sequence(['0', '1', '2']))
def test_gptrarray_utf8_none_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gptrarray_utf8_none_out())
def test_gptrarray_utf8_container_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gptrarray_utf8_container_out())
def test_gptrarray_utf8_full_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gptrarray_utf8_full_out())
def test_gptrarray_utf8_none_inout(self):
self.assertEquals(['-2', '-1', '0', '1'], GIMarshallingTests.gptrarray_utf8_none_inout(Sequence(('0', '1', '2'))))
def test_gptrarray_utf8_container_inout(self):
self.assertEquals(['-2', '-1','0', '1'], GIMarshallingTests.gptrarray_utf8_container_inout(['0', '1', '2']))
def test_gptrarray_utf8_full_inout(self):
self.assertEquals(['-2', '-1','0', '1'], GIMarshallingTests.gptrarray_utf8_full_inout(['0', '1', '2']))
class TestGList(unittest.TestCase):
def test_glist_int_none_return(self):
self.assertEquals([-1, 0, 1, 2], GIMarshallingTests.glist_int_none_return())
def test_glist_utf8_none_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.glist_utf8_none_return())
def test_glist_utf8_container_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.glist_utf8_container_return())
def test_glist_utf8_full_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.glist_utf8_full_return())
def test_glist_int_none_in(self):
GIMarshallingTests.glist_int_none_in(Sequence((-1, 0, 1, 2)))
self.assertRaises(TypeError, GIMarshallingTests.glist_int_none_in, Sequence((-1, '0', 1, 2)))
self.assertRaises(TypeError, GIMarshallingTests.glist_int_none_in, 42)
self.assertRaises(TypeError, GIMarshallingTests.glist_int_none_in, None)
def test_glist_utf8_none_in(self):
GIMarshallingTests.glist_utf8_none_in(Sequence(('0', '1', '2')))
def test_glist_utf8_none_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.glist_utf8_none_out())
def test_glist_utf8_container_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.glist_utf8_container_out())
def test_glist_utf8_full_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.glist_utf8_full_out())
def test_glist_utf8_none_inout(self):
self.assertEquals(['-2', '-1', '0', '1'], GIMarshallingTests.glist_utf8_none_inout(Sequence(('0', '1', '2'))))
def test_glist_utf8_container_inout(self):
self.assertEquals(['-2', '-1','0', '1'], GIMarshallingTests.glist_utf8_container_inout(('0', '1', '2')))
def test_glist_utf8_full_inout(self):
self.assertEquals(['-2', '-1','0', '1'], GIMarshallingTests.glist_utf8_full_inout(('0', '1', '2')))
class TestGSList(unittest.TestCase):
def test_gslist_int_none_return(self):
self.assertEquals([-1, 0, 1, 2], GIMarshallingTests.gslist_int_none_return())
def test_gslist_utf8_none_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gslist_utf8_none_return())
def test_gslist_utf8_container_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gslist_utf8_container_return())
def test_gslist_utf8_full_return(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gslist_utf8_full_return())
def test_gslist_int_none_in(self):
GIMarshallingTests.gslist_int_none_in(Sequence((-1, 0, 1, 2)))
self.assertRaises(TypeError, GIMarshallingTests.gslist_int_none_in, Sequence((-1, '0', 1, 2)))
self.assertRaises(TypeError, GIMarshallingTests.gslist_int_none_in, 42)
self.assertRaises(TypeError, GIMarshallingTests.gslist_int_none_in, None)
def test_gslist_utf8_none_in(self):
GIMarshallingTests.gslist_utf8_none_in(Sequence(('0', '1', '2')))
def test_gslist_utf8_none_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gslist_utf8_none_out())
def test_gslist_utf8_container_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gslist_utf8_container_out())
def test_gslist_utf8_full_out(self):
self.assertEquals(['0', '1', '2'], GIMarshallingTests.gslist_utf8_full_out())
def test_gslist_utf8_none_inout(self):
self.assertEquals(['-2', '-1', '0', '1'], GIMarshallingTests.gslist_utf8_none_inout(Sequence(('0', '1', '2'))))
def test_gslist_utf8_container_inout(self):
self.assertEquals(['-2', '-1','0', '1'], GIMarshallingTests.gslist_utf8_container_inout(('0', '1', '2')))
def test_gslist_utf8_full_inout(self):
self.assertEquals(['-2', '-1','0', '1'], GIMarshallingTests.gslist_utf8_full_inout(('0', '1', '2')))
class TestGHashTable(unittest.TestCase):
def test_ghashtable_int_none_return(self):
self.assertEquals({-1: 1, 0: 0, 1: -1, 2: -2}, GIMarshallingTests.ghashtable_int_none_return())
def test_ghashtable_int_none_return(self):
self.assertEquals({'-1': '1', '0': '0', '1': '-1', '2': '-2'}, GIMarshallingTests.ghashtable_utf8_none_return())
def test_ghashtable_int_container_return(self):
self.assertEquals({'-1': '1', '0': '0', '1': '-1', '2': '-2'}, GIMarshallingTests.ghashtable_utf8_container_return())
def test_ghashtable_int_full_return(self):
self.assertEquals({'-1': '1', '0': '0', '1': '-1', '2': '-2'}, GIMarshallingTests.ghashtable_utf8_full_return())
def test_ghashtable_int_none_in(self):
GIMarshallingTests.ghashtable_int_none_in({-1: 1, 0: 0, 1: -1, 2: -2})
self.assertRaises(TypeError, GIMarshallingTests.ghashtable_int_none_in, {-1: 1, '0': 0, 1: -1, 2: -2})
self.assertRaises(TypeError, GIMarshallingTests.ghashtable_int_none_in, {-1: 1, 0: '0', 1: -1, 2: -2})
self.assertRaises(TypeError, GIMarshallingTests.ghashtable_int_none_in, '{-1: 1, 0: 0, 1: -1, 2: -2}')
self.assertRaises(TypeError, GIMarshallingTests.ghashtable_int_none_in, None)
def test_ghashtable_utf8_none_in(self):
GIMarshallingTests.ghashtable_utf8_none_in({'-1': '1', '0': '0', '1': '-1', '2': '-2'})
def test_ghashtable_utf8_none_out(self):
self.assertEquals({'-1': '1', '0': '0', '1': '-1', '2': '-2'}, GIMarshallingTests.ghashtable_utf8_none_out())
def test_ghashtable_utf8_container_out(self):
self.assertEquals({'-1': '1', '0': '0', '1': '-1', '2': '-2'}, GIMarshallingTests.ghashtable_utf8_container_out())
def test_ghashtable_utf8_full_out(self):
self.assertEquals({'-1': '1', '0': '0', '1': '-1', '2': '-2'}, GIMarshallingTests.ghashtable_utf8_full_out())
def test_ghashtable_utf8_none_inout(self):
self.assertEquals({'-1': '1', '0': '0', '1': '1'},
GIMarshallingTests.ghashtable_utf8_none_inout({'-1': '1', '0': '0', '1': '-1', '2': '-2'}))
def test_ghashtable_utf8_container_inout(self):
self.assertEquals({'-1': '1', '0': '0', '1': '1'},
GIMarshallingTests.ghashtable_utf8_container_inout({'-1': '1', '0': '0', '1': '-1', '2': '-2'}))
def test_ghashtable_utf8_full_inout(self):
self.assertEquals({'-1': '1', '0': '0', '1': '1'},
GIMarshallingTests.ghashtable_utf8_full_inout({'-1': '1', '0': '0', '1': '-1', '2': '-2'}))
class TestGValue(unittest.TestCase):
def test_gvalue_return(self):
self.assertEquals(42, GIMarshallingTests.gvalue_return())
def test_gvalue_in(self):
GIMarshallingTests.gvalue_in(42)
value = GObject.Value()
value.init(GObject.TYPE_INT)
value.set_int(42)
GIMarshallingTests.gvalue_in(value)
def test_gvalue_out(self):
self.assertEquals(42, GIMarshallingTests.gvalue_out())
def test_gvalue_inout(self):
self.assertEquals('42', GIMarshallingTests.gvalue_inout(42))
value = GObject.Value()
value.init(GObject.TYPE_INT)
value.set_int(42)
self.assertEquals('42', GIMarshallingTests.gvalue_inout(value))
class TestGClosure(unittest.TestCase):
def test_gclosure_in(self):
GIMarshallingTests.gclosure_in(lambda: 42)
# test passing a closure between two C calls
closure = GIMarshallingTests.gclosure_return()
GIMarshallingTests.gclosure_in(closure)
self.assertRaises(TypeError, GIMarshallingTests.gclosure_in, 42)
self.assertRaises(TypeError, GIMarshallingTests.gclosure_in, None)
class TestPointer(unittest.TestCase):
def test_pointer_in_return(self):
self.assertEquals(GIMarshallingTests.pointer_in_return(42), 42)
class TestEnum(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''Run tests under a test locale.
Upper case conversion of member names should not be locale specific;
e. g. in Turkish, "i".upper() == "i", which gives results like "iNVALiD"
Run test under a locale which defines toupper('a') == 'a'
'''
cls.locale_dir = tempfile.mkdtemp()
subprocess.check_call(['localedef', '-i',
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'te_ST@nouppera'),
'-c', '-f', 'UTF-8', os.path.join(cls.locale_dir, 'te_ST.UTF-8@nouppera')])
os.environ['LOCPATH'] = cls.locale_dir
locale.setlocale(locale.LC_ALL, 'te_ST.UTF-8@nouppera')
@classmethod
def tearDownClass(cls):
locale.setlocale(locale.LC_ALL, 'C')
shutil.rmtree(cls.locale_dir)
try:
del os.environ['LOCPATH']
except KeyError:
pass
def test_enum(self):
self.assertTrue(issubclass(GIMarshallingTests.Enum, int))
self.assertTrue(isinstance(GIMarshallingTests.Enum.VALUE1, GIMarshallingTests.Enum))
self.assertTrue(isinstance(GIMarshallingTests.Enum.VALUE2, GIMarshallingTests.Enum))
self.assertTrue(isinstance(GIMarshallingTests.Enum.VALUE3, GIMarshallingTests.Enum))
self.assertEquals(42, GIMarshallingTests.Enum.VALUE3)
def test_value_nick_and_name(self):
self.assertEqual(GIMarshallingTests.Enum.VALUE1.value_nick, 'value1')
self.assertEqual(GIMarshallingTests.Enum.VALUE2.value_nick, 'value2')
self.assertEqual(GIMarshallingTests.Enum.VALUE3.value_nick, 'value3')
self.assertEqual(GIMarshallingTests.Enum.VALUE1.value_name, 'GI_MARSHALLING_TESTS_ENUM_VALUE1')
self.assertEqual(GIMarshallingTests.Enum.VALUE2.value_name, 'GI_MARSHALLING_TESTS_ENUM_VALUE2')
self.assertEqual(GIMarshallingTests.Enum.VALUE3.value_name, 'GI_MARSHALLING_TESTS_ENUM_VALUE3')
def test_enum_in(self):
GIMarshallingTests.enum_in(GIMarshallingTests.Enum.VALUE3)
GIMarshallingTests.enum_in(42)
self.assertRaises(TypeError, GIMarshallingTests.enum_in, 43)
self.assertRaises(TypeError, GIMarshallingTests.enum_in, 'GIMarshallingTests.Enum.VALUE3')
def test_enum_out(self):
enum = GIMarshallingTests.enum_out()
self.assertTrue(isinstance(enum, GIMarshallingTests.Enum))
self.assertEquals(enum, GIMarshallingTests.Enum.VALUE3)
def test_enum_inout(self):
enum = GIMarshallingTests.enum_inout(GIMarshallingTests.Enum.VALUE3)
self.assertTrue(isinstance(enum, GIMarshallingTests.Enum))
self.assertEquals(enum, GIMarshallingTests.Enum.VALUE1)
def test_enum_second(self):
# check for the bug where different non-gtype enums share the same class
self.assertNotEqual(GIMarshallingTests.Enum, GIMarshallingTests.SecondEnum)
# check that values are not being shared between different enums
self.assertTrue(hasattr(GIMarshallingTests.SecondEnum, "SECONDVALUE1"))
self.assertRaises(AttributeError, getattr, GIMarshallingTests.Enum, "SECONDVALUE1")
self.assertTrue(hasattr(GIMarshallingTests.Enum, "VALUE1"))
self.assertRaises(AttributeError, getattr, GIMarshallingTests.SecondEnum, "VALUE1")
class TestGEnum(unittest.TestCase):
def test_genum(self):
self.assertTrue(issubclass(GIMarshallingTests.GEnum, GObject.GEnum))
self.assertTrue(isinstance(GIMarshallingTests.GEnum.VALUE1, GIMarshallingTests.GEnum))
self.assertTrue(isinstance(GIMarshallingTests.GEnum.VALUE2, GIMarshallingTests.GEnum))
self.assertTrue(isinstance(GIMarshallingTests.GEnum.VALUE3, GIMarshallingTests.GEnum))
self.assertEquals(42, GIMarshallingTests.GEnum.VALUE3)
def test_value_nick_and_name(self):
self.assertEqual(GIMarshallingTests.GEnum.VALUE1.value_nick, 'value1')
self.assertEqual(GIMarshallingTests.GEnum.VALUE2.value_nick, 'value2')
self.assertEqual(GIMarshallingTests.GEnum.VALUE3.value_nick, 'value3')
self.assertEqual(GIMarshallingTests.GEnum.VALUE1.value_name, 'GI_MARSHALLING_TESTS_GENUM_VALUE1')
self.assertEqual(GIMarshallingTests.GEnum.VALUE2.value_name, 'GI_MARSHALLING_TESTS_GENUM_VALUE2')
self.assertEqual(GIMarshallingTests.GEnum.VALUE3.value_name, 'GI_MARSHALLING_TESTS_GENUM_VALUE3')
def test_genum_in(self):
GIMarshallingTests.genum_in(GIMarshallingTests.GEnum.VALUE3)
GIMarshallingTests.genum_in(42)
self.assertRaises(TypeError, GIMarshallingTests.genum_in, 43)
self.assertRaises(TypeError, GIMarshallingTests.genum_in, 'GIMarshallingTests.GEnum.VALUE3')
def test_genum_out(self):
genum = GIMarshallingTests.genum_out()
self.assertTrue(isinstance(genum, GIMarshallingTests.GEnum))
self.assertEquals(genum, GIMarshallingTests.GEnum.VALUE3)
def test_genum_inout(self):
genum = GIMarshallingTests.genum_inout(GIMarshallingTests.GEnum.VALUE3)
self.assertTrue(isinstance(genum, GIMarshallingTests.GEnum))
self.assertEquals(genum, GIMarshallingTests.GEnum.VALUE1)
class TestGFlags(unittest.TestCase):
def test_flags(self):
self.assertTrue(issubclass(GIMarshallingTests.Flags, GObject.GFlags))
self.assertTrue(isinstance(GIMarshallingTests.Flags.VALUE1, GIMarshallingTests.Flags))
self.assertTrue(isinstance(GIMarshallingTests.Flags.VALUE2, GIMarshallingTests.Flags))
self.assertTrue(isinstance(GIMarshallingTests.Flags.VALUE3, GIMarshallingTests.Flags))
# __or__() operation should still return an instance, not an int.
self.assertTrue(isinstance(GIMarshallingTests.Flags.VALUE1 | GIMarshallingTests.Flags.VALUE2,
GIMarshallingTests.Flags))
self.assertEquals(1 << 1, GIMarshallingTests.Flags.VALUE2)
def test_value_nick_and_name(self):
self.assertEqual(GIMarshallingTests.Flags.VALUE1.first_value_nick, 'value1')
self.assertEqual(GIMarshallingTests.Flags.VALUE2.first_value_nick, 'value2')
self.assertEqual(GIMarshallingTests.Flags.VALUE3.first_value_nick, 'value3')
self.assertEqual(GIMarshallingTests.Flags.VALUE1.first_value_name, 'GI_MARSHALLING_TESTS_FLAGS_VALUE1')
self.assertEqual(GIMarshallingTests.Flags.VALUE2.first_value_name, 'GI_MARSHALLING_TESTS_FLAGS_VALUE2')
self.assertEqual(GIMarshallingTests.Flags.VALUE3.first_value_name, 'GI_MARSHALLING_TESTS_FLAGS_VALUE3')
def test_flags_in(self):
GIMarshallingTests.flags_in(GIMarshallingTests.Flags.VALUE2)
# result of __or__() operation should still be valid instance, not an int.
GIMarshallingTests.flags_in(GIMarshallingTests.Flags.VALUE2 | GIMarshallingTests.Flags.VALUE2)
GIMarshallingTests.flags_in_zero(Number(0))
self.assertRaises(TypeError, GIMarshallingTests.flags_in, 1 << 1)
self.assertRaises(TypeError, GIMarshallingTests.flags_in, 'GIMarshallingTests.Flags.VALUE2')
def test_flags_out(self):
flags = GIMarshallingTests.flags_out()
self.assertTrue(isinstance(flags, GIMarshallingTests.Flags))
self.assertEquals(flags, GIMarshallingTests.Flags.VALUE2)
def test_flags_inout(self):
flags = GIMarshallingTests.flags_inout(GIMarshallingTests.Flags.VALUE2)
self.assertTrue(isinstance(flags, GIMarshallingTests.Flags))
self.assertEquals(flags, GIMarshallingTests.Flags.VALUE1)
class TestNoTypeFlags(unittest.TestCase):
def test_flags(self):
self.assertTrue(issubclass(GIMarshallingTests.NoTypeFlags, GObject.GFlags))
self.assertTrue(isinstance(GIMarshallingTests.NoTypeFlags.VALUE1, GIMarshallingTests.NoTypeFlags))
self.assertTrue(isinstance(GIMarshallingTests.NoTypeFlags.VALUE2, GIMarshallingTests.NoTypeFlags))
self.assertTrue(isinstance(GIMarshallingTests.NoTypeFlags.VALUE3, GIMarshallingTests.NoTypeFlags))
# __or__() operation should still return an instance, not an int.
self.assertTrue(isinstance(GIMarshallingTests.NoTypeFlags.VALUE1 | GIMarshallingTests.NoTypeFlags.VALUE2,
GIMarshallingTests.NoTypeFlags))
self.assertEquals(1 << 1, GIMarshallingTests.NoTypeFlags.VALUE2)
def test_value_nick_and_name(self):
self.assertEqual(GIMarshallingTests.NoTypeFlags.VALUE1.first_value_nick, 'value1')
self.assertEqual(GIMarshallingTests.NoTypeFlags.VALUE2.first_value_nick, 'value2')
self.assertEqual(GIMarshallingTests.NoTypeFlags.VALUE3.first_value_nick, 'value3')
self.assertEqual(GIMarshallingTests.NoTypeFlags.VALUE1.first_value_name, 'GI_MARSHALLING_TESTS_NO_TYPE_FLAGS_VALUE1')
self.assertEqual(GIMarshallingTests.NoTypeFlags.VALUE2.first_value_name, 'GI_MARSHALLING_TESTS_NO_TYPE_FLAGS_VALUE2')
self.assertEqual(GIMarshallingTests.NoTypeFlags.VALUE3.first_value_name, 'GI_MARSHALLING_TESTS_NO_TYPE_FLAGS_VALUE3')
def test_flags_in(self):
GIMarshallingTests.no_type_flags_in(GIMarshallingTests.NoTypeFlags.VALUE2)
GIMarshallingTests.no_type_flags_in(GIMarshallingTests.NoTypeFlags.VALUE2 | GIMarshallingTests.NoTypeFlags.VALUE2)
GIMarshallingTests.no_type_flags_in_zero(Number(0))
self.assertRaises(TypeError, GIMarshallingTests.no_type_flags_in, 1 << 1)
self.assertRaises(TypeError, GIMarshallingTests.no_type_flags_in, 'GIMarshallingTests.NoTypeFlags.VALUE2')
def test_flags_out(self):
flags = GIMarshallingTests.no_type_flags_out()
self.assertTrue(isinstance(flags, GIMarshallingTests.NoTypeFlags))
self.assertEquals(flags, GIMarshallingTests.NoTypeFlags.VALUE2)
def test_flags_inout(self):
flags = GIMarshallingTests.no_type_flags_inout(GIMarshallingTests.NoTypeFlags.VALUE2)
self.assertTrue(isinstance(flags, GIMarshallingTests.NoTypeFlags))
self.assertEquals(flags, GIMarshallingTests.NoTypeFlags.VALUE1)
class TestStructure(unittest.TestCase):
def test_simple_struct(self):
self.assertTrue(issubclass(GIMarshallingTests.SimpleStruct, GObject.GPointer))
struct = GIMarshallingTests.SimpleStruct()
self.assertTrue(isinstance(struct, GIMarshallingTests.SimpleStruct))
self.assertEquals(0, struct.long_)
self.assertEquals(0, struct.int8)
struct.long_ = 6
struct.int8 = 7
self.assertEquals(6, struct.long_)
self.assertEquals(7, struct.int8)
del struct
def test_nested_struct(self):
struct = GIMarshallingTests.NestedStruct()
self.assertTrue(isinstance(struct.simple_struct, GIMarshallingTests.SimpleStruct))
struct.simple_struct.long_ = 42
self.assertEquals(42, struct.simple_struct.long_)
del struct
def test_not_simple_struct(self):
struct = GIMarshallingTests.NotSimpleStruct()
self.assertEquals(None, struct.pointer)
def test_simple_struct_return(self):
struct = GIMarshallingTests.simple_struct_returnv()
self.assertTrue(isinstance(struct, GIMarshallingTests.SimpleStruct))
self.assertEquals(6, struct.long_)
self.assertEquals(7, struct.int8)
del struct
def test_simple_struct_in(self):
struct = GIMarshallingTests.SimpleStruct()
struct.long_ = 6
struct.int8 = 7
GIMarshallingTests.SimpleStruct.inv(struct)
del struct
struct = GIMarshallingTests.NestedStruct()
self.assertRaises(TypeError, GIMarshallingTests.SimpleStruct.inv, struct)
del struct
self.assertRaises(TypeError, GIMarshallingTests.SimpleStruct.inv, None)
def test_simple_struct_method(self):
struct = GIMarshallingTests.SimpleStruct()
struct.long_ = 6
struct.int8 = 7
struct.method()
del struct
self.assertRaises(TypeError, GIMarshallingTests.SimpleStruct.method)
def test_pointer_struct(self):
self.assertTrue(issubclass(GIMarshallingTests.PointerStruct, GObject.GPointer))
struct = GIMarshallingTests.PointerStruct()
self.assertTrue(isinstance(struct, GIMarshallingTests.PointerStruct))
del struct
def test_pointer_struct_return(self):
struct = GIMarshallingTests.pointer_struct_returnv()
self.assertTrue(isinstance(struct, GIMarshallingTests.PointerStruct))
self.assertEquals(42, struct.long_)
del struct
def test_pointer_struct_in(self):
struct = GIMarshallingTests.PointerStruct()
struct.long_ = 42
struct.inv()
del struct
def test_boxed_struct(self):
self.assertTrue(issubclass(GIMarshallingTests.BoxedStruct, GObject.GBoxed))
struct = GIMarshallingTests.BoxedStruct()
self.assertTrue(isinstance(struct, GIMarshallingTests.BoxedStruct))
self.assertEquals(0, struct.long_)
self.assertEquals([], struct.g_strv)
del struct
def test_boxed_struct_new(self):
struct = GIMarshallingTests.BoxedStruct.new()
self.assertTrue(isinstance(struct, GIMarshallingTests.BoxedStruct))
del struct
def test_boxed_struct_copy(self):
struct = GIMarshallingTests.BoxedStruct()
new_struct = struct.copy()
self.assertTrue(isinstance(new_struct, GIMarshallingTests.BoxedStruct))
del new_struct
del struct
def test_boxed_struct_return(self):
struct = GIMarshallingTests.boxed_struct_returnv()
self.assertTrue(isinstance(struct, GIMarshallingTests.BoxedStruct))
self.assertEquals(42, struct.long_)
self.assertEquals(['0', '1', '2'], struct.g_strv)
del struct
def test_boxed_struct_in(self):
struct = GIMarshallingTests.BoxedStruct()
struct.long_ = 42
struct.inv()
del struct
def test_boxed_struct_out(self):
struct = GIMarshallingTests.boxed_struct_out()
self.assertTrue(isinstance(struct, GIMarshallingTests.BoxedStruct))
self.assertEquals(42, struct.long_)
del struct
def test_boxed_struct_inout(self):
in_struct = GIMarshallingTests.BoxedStruct()
in_struct.long_ = 42
out_struct = GIMarshallingTests.boxed_struct_inout(in_struct)
self.assertTrue(isinstance(out_struct, GIMarshallingTests.BoxedStruct))
self.assertEquals(0, out_struct.long_)
del in_struct
del out_struct
def test_union(self):
union = GIMarshallingTests.Union()
self.assertTrue(isinstance(union, GIMarshallingTests.Union))
new_union = union.copy()
self.assertTrue(isinstance(new_union, GIMarshallingTests.Union))
del union
del new_union
def test_union_return(self):
union = GIMarshallingTests.union_returnv()
self.assertTrue(isinstance(union, GIMarshallingTests.Union))
self.assertEquals(42, union.long_)
del union
def test_union_in(self):
union = GIMarshallingTests.Union()
union.long_ = 42
union.inv()
del union
def test_union_method(self):
union = GIMarshallingTests.Union()
union.long_ = 42
union.method()
del union
self.assertRaises(TypeError, GIMarshallingTests.Union.method)
class TestGObject(unittest.TestCase):
def test_object(self):
self.assertTrue(issubclass(GIMarshallingTests.Object, GObject.GObject))
object_ = GIMarshallingTests.Object()
self.assertTrue(isinstance(object_, GIMarshallingTests.Object))
self.assertEquals(object_.__grefcount__, 1)
def test_object_new(self):
object_ = GIMarshallingTests.Object.new(42)
self.assertTrue(isinstance(object_, GIMarshallingTests.Object))
self.assertEquals(object_.__grefcount__, 1)
def test_object_int(self):
object_ = GIMarshallingTests.Object(int = 42)
self.assertEquals(object_.int_, 42)
# FIXME: Don't work yet.
# object_.int_ = 0
# self.assertEquals(object_.int_, 0)
def test_object_static_method(self):
GIMarshallingTests.Object.static_method()
def test_object_method(self):
GIMarshallingTests.Object(int = 42).method()
self.assertRaises(TypeError, GIMarshallingTests.Object.method, GObject.GObject())
self.assertRaises(TypeError, GIMarshallingTests.Object.method)
def test_sub_object(self):
self.assertTrue(issubclass(GIMarshallingTests.SubObject, GIMarshallingTests.Object))
object_ = GIMarshallingTests.SubObject()
self.assertTrue(isinstance(object_, GIMarshallingTests.SubObject))
def test_sub_object_new(self):
self.assertRaises(TypeError, GIMarshallingTests.SubObject.new, 42)
def test_sub_object_static_method(self):
object_ = GIMarshallingTests.SubObject()
object_.static_method()
def test_sub_object_method(self):
object_ = GIMarshallingTests.SubObject(int = 42)
object_.method()
def test_sub_object_sub_method(self):
object_ = GIMarshallingTests.SubObject()
object_.sub_method()
def test_sub_object_overwritten_method(self):
object_ = GIMarshallingTests.SubObject()
object_.overwritten_method()
self.assertRaises(TypeError, GIMarshallingTests.SubObject.overwritten_method, GIMarshallingTests.Object())
def test_sub_object_int(self):
object_ = GIMarshallingTests.SubObject()
self.assertEquals(object_.int_, 0)
# FIXME: Don't work yet.
# object_.int_ = 42
# self.assertEquals(object_.int_, 42)
def test_object_none_return(self):
object_ = GIMarshallingTests.Object.none_return()
self.assertTrue(isinstance(object_, GIMarshallingTests.Object))
self.assertEquals(object_.__grefcount__, 2)
def test_object_full_return(self):
object_ = GIMarshallingTests.Object.full_return()
self.assertTrue(isinstance(object_, GIMarshallingTests.Object))
self.assertEquals(object_.__grefcount__, 1)
def test_object_none_in(self):
object_ = GIMarshallingTests.Object(int = 42)
GIMarshallingTests.Object.none_in(object_)
self.assertEquals(object_.__grefcount__, 1)
object_ = GIMarshallingTests.SubObject(int = 42)
GIMarshallingTests.Object.none_in(object_)
object_ = GObject.GObject()
self.assertRaises(TypeError, GIMarshallingTests.Object.none_in, object_)
self.assertRaises(TypeError, GIMarshallingTests.Object.none_in, None)
def test_object_none_out(self):
object_ = GIMarshallingTests.Object.none_out()
self.assertTrue(isinstance(object_, GIMarshallingTests.Object))
self.assertEquals(object_.__grefcount__, 2)
new_object = GIMarshallingTests.Object.none_out()
self.assertTrue(new_object is object_)
def test_object_full_out(self):
object_ = GIMarshallingTests.Object.full_out()
self.assertTrue(isinstance(object_, GIMarshallingTests.Object))
self.assertEquals(object_.__grefcount__, 1)
def test_object_none_inout(self):
object_ = GIMarshallingTests.Object(int = 42)
new_object = GIMarshallingTests.Object.none_inout(object_)
self.assertTrue(isinstance(new_object, GIMarshallingTests.Object))
self.assertFalse(object_ is new_object)
self.assertEquals(object_.__grefcount__, 1)
self.assertEquals(new_object.__grefcount__, 2)
new_new_object = GIMarshallingTests.Object.none_inout(object_)
self.assertTrue(new_new_object is new_object)
GIMarshallingTests.Object.none_inout(GIMarshallingTests.SubObject(int = 42))
def test_object_full_inout(self):
object_ = GIMarshallingTests.Object(int = 42)
new_object = GIMarshallingTests.Object.full_inout(object_)
self.assertTrue(isinstance(new_object, GIMarshallingTests.Object))
self.assertFalse(object_ is new_object)
self.assertEquals(object_.__grefcount__, 2)
self.assertEquals(new_object.__grefcount__, 1)
# FIXME: Doesn't actually return the same object.
# def test_object_inout_same(self):
# object_ = GIMarshallingTests.Object()
# new_object = GIMarshallingTests.object_full_inout(object_)
# self.assertTrue(object_ is new_object)
# self.assertEquals(object_.__grefcount__, 1)
class TestPythonGObject(unittest.TestCase):
class Object(GIMarshallingTests.Object):
def __init__(self, int):
GIMarshallingTests.Object.__init__(self)
self.val = None
def method(self):
# Don't call super, which asserts that self.int == 42.
pass
def do_method_int8_in(self, int8):
self.val = int8
def do_method_int8_out(self):
return 42
def do_method_with_default_implementation(self, int8):
GIMarshallingTests.Object.do_method_with_default_implementation(self, int8)
self.props.int += int8
class SubObject(GIMarshallingTests.SubObject):
def __init__(self, int):
GIMarshallingTests.SubObject.__init__(self)
self.val = None
def do_method_with_default_implementation(self, int8):
self.val = int8
def test_object(self):
self.assertTrue(issubclass(self.Object, GIMarshallingTests.Object))
object_ = self.Object(int = 42)
self.assertTrue(isinstance(object_, self.Object))
def test_object_method(self):
self.Object(int = 0).method()
def test_object_vfuncs(self):
object_ = self.Object(int = 42)
object_.method_int8_in(84)
self.assertEqual(object_.val, 84)
self.assertEqual(object_.method_int8_out(), 42)
object_.method_with_default_implementation(42)
self.assertEqual(object_.props.int, 84)
class ObjectWithoutVFunc(GIMarshallingTests.Object):
def __init__(self, int):
GIMarshallingTests.Object.__init__(self)
object_ = ObjectWithoutVFunc(int = 42)
object_.method_with_default_implementation(84)
self.assertEqual(object_.props.int, 84)
def test_subobject_parent_vfunc(self):
object_ = self.SubObject(int = 81)
object_.method_with_default_implementation(87)
self.assertEquals(object_.val, 87)
def test_dynamic_module(self):
from gi.module import DynamicGObjectModule
self.assertTrue(isinstance(GObject, DynamicGObjectModule))
# compare the same enum from both the pygobject attrs and gi GObject attrs
self.assertEquals(GObject.SIGNAL_ACTION, GObject.SignalFlags.ACTION)
# compare a static gobject attr with a dynamic GObject attr
self.assertEquals(GObject.GObject, gobject.GObject)
def test_subobject_non_vfunc_do_method(self):
class PythonObjectWithNonVFuncDoMethod:
def do_not_a_vfunc(self):
return 5
class ObjectOverrideNonVFuncDoMethod(GIMarshallingTests.Object, PythonObjectWithNonVFuncDoMethod):
def do_not_a_vfunc(self):
value = super(ObjectOverrideNonVFuncDoMethod, self).do_not_a_vfunc()
return 13 + value
object_ = ObjectOverrideNonVFuncDoMethod()
self.assertEquals(18, object_.do_not_a_vfunc())
def test_native_function_not_set_in_subclass_dict(self):
# Previously, GI was setting virtual functions on the class as well
# as any *native* class that subclasses it. Here we check that it is only
# set on the class that the method is originally from.
self.assertTrue('do_method_with_default_implementation' in GIMarshallingTests.Object.__dict__)
self.assertTrue('do_method_with_default_implementation' not in GIMarshallingTests.SubObject.__dict__)
# Here we check that accessing a vfunc from the subclass returns the same wrapper object,
# meaning that multiple wrapper objects have not been created for the same vfunc.
func1 = GIMarshallingTests.Object.do_method_with_default_implementation
func2 = GIMarshallingTests.SubObject.do_method_with_default_implementation
if sys.version_info < (3,0):
func1 = func1.im_func
func2 = func2.im_func
self.assertTrue(func1 is func2)
def test_subobject_with_interface_and_non_vfunc_do_method(self):
# There was a bug for searching for vfuncs in interfaces. It was
# triggered by having a do_* method that wasn't overriding
# a native vfunc, as well as inheriting from an interface.
class GObjectSubclassWithInterface(GObject.GObject, GIMarshallingTests.Interface):
def do_method_not_a_vfunc(self):
pass
class TestMultiOutputArgs(unittest.TestCase):
def test_int_out_out(self):
self.assertEquals((6, 7), GIMarshallingTests.int_out_out())
def test_int_return_out(self):
self.assertEquals((6, 7), GIMarshallingTests.int_return_out())
class TestGErrorException(unittest.TestCase):
def test_gerror_exception(self):
self.assertRaises(GObject.GError, GIMarshallingTests.gerror)
try:
GIMarshallingTests.gerror()
except Exception:
etype, e = sys.exc_info()[:2]
self.assertEquals(e.domain, GIMarshallingTests.CONSTANT_GERROR_DOMAIN)
self.assertEquals(e.code, GIMarshallingTests.CONSTANT_GERROR_CODE)
self.assertEquals(e.message, GIMarshallingTests.CONSTANT_GERROR_MESSAGE)
# Interface
class TestInterfaces(unittest.TestCase):
def test_wrapper(self):
self.assertTrue(issubclass(GIMarshallingTests.Interface, GObject.GInterface))
self.assertEquals(GIMarshallingTests.Interface.__gtype__.name, 'GIMarshallingTestsInterface')
self.assertRaises(NotImplementedError, GIMarshallingTests.Interface)
def test_implementation(self):
class TestInterfaceImpl(GObject.GObject, GIMarshallingTests.Interface):
def __init__(self):
GObject.GObject.__init__(self)
self.val = None
def do_test_int8_in(self, int8):
self.val = int8
self.assertTrue(issubclass(TestInterfaceImpl, GIMarshallingTests.Interface))
instance = TestInterfaceImpl()
self.assertTrue(isinstance(instance, GIMarshallingTests.Interface))
GIMarshallingTests.test_interface_test_int8_in(instance, 42)
self.assertEquals(instance.val, 42)
class TestInterfaceImplA(TestInterfaceImpl):
pass
class TestInterfaceImplB(TestInterfaceImplA):
pass
instance = TestInterfaceImplA()
GIMarshallingTests.test_interface_test_int8_in(instance, 42)
self.assertEquals(instance.val, 42)
def test_mro(self):
# there was a problem with Python bailing out because of
# http://en.wikipedia.org/wiki/Diamond_problem with interfaces,
# which shouldn't really be a problem.
class TestInterfaceImpl(GObject.GObject, GIMarshallingTests.Interface):
pass
class TestInterfaceImpl2(GIMarshallingTests.Interface,
TestInterfaceImpl):
pass
class TestInterfaceImpl3(TestInterfaceImpl,
GIMarshallingTests.Interface2):
pass
class TestInterfaceClash(unittest.TestCase):
def test_clash(self):
def create_clash():
class TestClash(GObject.GObject, GIMarshallingTests.Interface, GIMarshallingTests.Interface2):
def do_test_int8_in(self, int8):
pass
TestClash()
self.assertRaises(TypeError, create_clash)
class TestOverrides(unittest.TestCase):
def test_constant(self):
self.assertEquals(GIMarshallingTests.OVERRIDES_CONSTANT, 7)
def test_struct(self):
# Test that the constructor has been overridden.
struct = GIMarshallingTests.OverridesStruct(42)
self.assertTrue(isinstance(struct, GIMarshallingTests.OverridesStruct))
# Test that the method has been overridden.
self.assertEquals(6, struct.method())
del struct
# Test that the overrides wrapper has been registered.
struct = GIMarshallingTests.overrides_struct_returnv()
self.assertTrue(isinstance(struct, GIMarshallingTests.OverridesStruct))
del struct
def test_object(self):
# Test that the constructor has been overridden.
object_ = GIMarshallingTests.OverridesObject(42)
self.assertTrue(isinstance(object_, GIMarshallingTests.OverridesObject))
# Test that the alternate constructor has been overridden.
object_ = GIMarshallingTests.OverridesObject.new(42)
self.assertTrue(isinstance(object_, GIMarshallingTests.OverridesObject))
# Test that the method has been overridden.
self.assertEquals(6, object_.method())
# Test that the overrides wrapper has been registered.
object_ = GIMarshallingTests.OverridesObject.returnv()
self.assertTrue(isinstance(object_, GIMarshallingTests.OverridesObject))
def test_module_name(self):
self.assertEquals(GIMarshallingTests.OverridesStruct.__module__, 'gi.overrides.GIMarshallingTests')
self.assertEquals(GObject.InitiallyUnowned.__module__, 'gi.repository.GObject')
class TestDir(unittest.TestCase):
def test_members_list(self):
list = dir(GIMarshallingTests)
self.assertTrue('OverridesStruct' in list)
self.assertTrue('BoxedStruct' in list)
self.assertTrue('OVERRIDES_CONSTANT' in list)
self.assertTrue('GEnum' in list)
self.assertTrue('int32_return_max' in list)
def test_modules_list(self):
import gi.repository
list = dir(gi.repository)
self.assertTrue('GIMarshallingTests' in list)
# FIXME: test to see if a module which was not imported is in the list
# we should be listing every typelib we find, not just the ones
# which are imported
#
# to test this I recommend we compile a fake module which
# our tests would never import and check to see if it is
# in the list:
#
# self.assertTrue('DoNotImportDummyTests' in list)
class TestGErrorArrayInCrash(unittest.TestCase):
# Previously there was a bug in invoke, in which C arrays were unwrapped
# from inside GArrays to be passed to the C function. But when a GError was
# set, invoke would attempt to free the C array as if it were a GArray.
# This crash is only for C arrays. It does not happen for C functions which
# take in GArrays. See https://bugzilla.gnome.org/show_bug.cgi?id=642708
def test_gerror_array_in_crash(self):
self.assertRaises(GObject.GError, GIMarshallingTests.gerror_array_in, [1, 2, 3])
| alexef/pygobject | tests/test_gi.py | Python | lgpl-2.1 | 67,093 |
""""""
from __future__ import annotations
from typing import Any, Dict, List
from flask import g
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Column, ForeignKey
from whoosh.fields import STORED
from .base import AUDITABLE, EDITABLE, SEARCHABLE, SYSTEM
from .subjects import User
class OwnedMixin:
__index_to__ = (
("creator", ("creator",)),
("creator_name", (("creator_name", STORED),)),
("owner", ("owner",)),
("owner_name", (("owner_name", STORED),)),
)
def __init__(self, *args: list, **kwargs: dict[str, Any]):
try:
user = g.user
if not self.creator and not g.user.is_anonymous:
self.creator = user
if not self.owner and not g.user.is_anonymous:
self.owner = user
except (RuntimeError, AttributeError):
pass
@declared_attr
def creator_id(cls):
return Column(ForeignKey(User.id), info=SYSTEM)
@declared_attr
def creator(cls):
primary_join = f"User.id == {cls.__name__}.creator_id"
return relationship(
User,
primaryjoin=primary_join,
lazy="joined",
uselist=False,
info=SYSTEM | SEARCHABLE,
)
@property
def creator_name(self) -> str:
return str(self.creator) if self.creator else ""
@declared_attr
def owner_id(cls):
return Column(ForeignKey(User.id), info=EDITABLE | AUDITABLE)
@declared_attr
def owner(cls):
primary_join = f"User.id == {cls.__name__}.owner_id"
return relationship(
User,
primaryjoin=primary_join,
lazy="joined",
uselist=False,
info=EDITABLE | AUDITABLE | SEARCHABLE,
)
@property
def owner_name(self) -> str:
return str(self.owner) if self.owner else ""
| abilian/abilian-core | src/abilian/core/models/owned.py | Python | lgpl-2.1 | 1,952 |
"""
Load and save a set of chosen implementations.
@since: 0.27
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os
from zeroinstall import _
from zeroinstall.injector import model
from zeroinstall.injector.policy import Policy, get_deprecated_singleton_config
from zeroinstall.injector.model import process_binding, process_depends, binding_names, Command
from zeroinstall.injector.namespaces import XMLNS_IFACE
from zeroinstall.injector.qdom import Element, Prefixes
from zeroinstall.support import tasks
class Selection(object):
"""A single selected implementation in a L{Selections} set.
@ivar dependencies: list of dependencies
@type dependencies: [L{model.Dependency}]
@ivar attrs: XML attributes map (name is in the format "{namespace} {localName}")
@type attrs: {str: str}
@ivar version: the implementation's version number
@type version: str"""
interface = property(lambda self: self.attrs['interface'])
id = property(lambda self: self.attrs['id'])
version = property(lambda self: self.attrs['version'])
feed = property(lambda self: self.attrs.get('from-feed', self.interface))
main = property(lambda self: self.attrs.get('main', None))
@property
def local_path(self):
local_path = self.attrs.get('local-path', None)
if local_path:
return local_path
if self.id.startswith('/'):
return self.id
return None
def __repr__(self):
return self.id
def is_available(self, stores):
"""Is this implementation available locally?
(a local implementation or a cached ZeroInstallImplementation)
@rtype: bool
@since: 0.53"""
path = self.local_path
if path is not None:
return os.path.exists(path)
path = stores.lookup_maybe(self.digests)
return path is not None
class ImplSelection(Selection):
"""A Selection created from an Implementation"""
__slots__ = ['impl', 'dependencies', 'attrs']
def __init__(self, iface_uri, impl, dependencies):
assert impl
self.impl = impl
self.dependencies = dependencies
attrs = impl.metadata.copy()
attrs['id'] = impl.id
attrs['version'] = impl.get_version()
attrs['interface'] = iface_uri
attrs['from-feed'] = impl.feed.url
if impl.local_path:
attrs['local-path'] = impl.local_path
self.attrs = attrs
@property
def bindings(self): return self.impl.bindings
@property
def digests(self): return self.impl.digests
class XMLSelection(Selection):
"""A Selection created by reading an XML selections document.
@ivar digests: a list of manifest digests
@type digests: [str]
"""
__slots__ = ['bindings', 'dependencies', 'attrs', 'digests']
def __init__(self, dependencies, bindings = None, attrs = None, digests = None):
if bindings is None: bindings = []
if digests is None: digests = []
self.dependencies = dependencies
self.bindings = bindings
self.attrs = attrs
self.digests = digests
assert self.interface
assert self.id
assert self.version
assert self.feed
class Selections(object):
"""
A selected set of components which will make up a complete program.
@ivar interface: the interface of the program
@type interface: str
@ivar commands: how to run this selection (will contain more than one item if runners are used)
@type commands: [{L{Command}}]
@ivar selections: the selected implementations
@type selections: {str: L{Selection}}
"""
__slots__ = ['interface', 'selections', 'commands']
def __init__(self, source):
"""Constructor.
@param source: a map of implementations, policy or selections document
@type source: {str: L{Selection}} | L{Policy} | L{Element}
"""
self.selections = {}
if source is None:
self.commands = []
# (Solver will fill everything in)
elif isinstance(source, Policy):
self._init_from_policy(source)
elif isinstance(source, Element):
self._init_from_qdom(source)
else:
raise Exception(_("Source not a Policy or qdom.Element!"))
def _init_from_policy(self, policy):
"""Set the selections from a policy.
@deprecated: use Solver.selections instead
@param policy: the policy giving the selected implementations."""
self.interface = policy.root
self.selections = policy.solver.selections.selections
self.commands = policy.solver.selections.commands
def _init_from_qdom(self, root):
"""Parse and load a selections document.
@param root: a saved set of selections."""
self.interface = root.getAttribute('interface')
assert self.interface
self.commands = []
for selection in root.childNodes:
if selection.uri != XMLNS_IFACE:
continue
if selection.name != 'selection':
if selection.name == 'command':
self.commands.append(Command(selection, None))
continue
requires = []
bindings = []
digests = []
for dep_elem in selection.childNodes:
if dep_elem.uri != XMLNS_IFACE:
continue
if dep_elem.name in binding_names:
bindings.append(process_binding(dep_elem))
elif dep_elem.name == 'requires':
dep = process_depends(dep_elem, None)
requires.append(dep)
elif dep_elem.name == 'manifest-digest':
for aname, avalue in dep_elem.attrs.iteritems():
digests.append('%s=%s' % (aname, avalue))
# For backwards compatibility, allow getting the digest from the ID
sel_id = selection.attrs['id']
local_path = selection.attrs.get("local-path", None)
if (not digests and not local_path) and '=' in sel_id:
alg = sel_id.split('=', 1)[0]
if alg in ('sha1', 'sha1new', 'sha256'):
digests.append(sel_id)
iface_uri = selection.attrs['interface']
s = XMLSelection(requires, bindings, selection.attrs, digests)
self.selections[iface_uri] = s
if not self.commands:
# Old-style selections document; use the main attribute
if iface_uri == self.interface:
root_sel = self.selections[self.interface]
main = root_sel.attrs.get('main', None)
if main is not None:
self.commands = [Command(Element(XMLNS_IFACE, 'command', {'path': main}), None)]
def toDOM(self):
"""Create a DOM document for the selected implementations.
The document gives the URI of the root, plus each selected implementation.
For each selected implementation, we record the ID, the version, the URI and
(if different) the feed URL. We also record all the bindings needed.
@return: a new DOM Document"""
from xml.dom import minidom, XMLNS_NAMESPACE
assert self.interface
impl = minidom.getDOMImplementation()
doc = impl.createDocument(XMLNS_IFACE, "selections", None)
root = doc.documentElement
root.setAttributeNS(XMLNS_NAMESPACE, 'xmlns', XMLNS_IFACE)
root.setAttributeNS(None, 'interface', self.interface)
prefixes = Prefixes(XMLNS_IFACE)
for iface, selection in sorted(self.selections.items()):
selection_elem = doc.createElementNS(XMLNS_IFACE, 'selection')
selection_elem.setAttributeNS(None, 'interface', selection.interface)
root.appendChild(selection_elem)
for name, value in selection.attrs.iteritems():
if ' ' in name:
ns, localName = name.split(' ', 1)
prefixes.setAttributeNS(selection_elem, ns, localName, value)
elif name == 'stability':
pass
elif name == 'from-feed':
# Don't bother writing from-feed attr if it's the same as the interface
if value != selection.attrs['interface']:
selection_elem.setAttributeNS(None, name, value)
elif name not in ('main', 'self-test'): # (replaced by <command>)
selection_elem.setAttributeNS(None, name, value)
if selection.digests:
manifest_digest = doc.createElementNS(XMLNS_IFACE, 'manifest-digest')
for digest in selection.digests:
aname, avalue = digest.split('=', 1)
assert ':' not in aname
manifest_digest.setAttribute(aname, avalue)
selection_elem.appendChild(manifest_digest)
for b in selection.bindings:
selection_elem.appendChild(b._toxml(doc))
for dep in selection.dependencies:
dep_elem = doc.createElementNS(XMLNS_IFACE, 'requires')
dep_elem.setAttributeNS(None, 'interface', dep.interface)
selection_elem.appendChild(dep_elem)
for m in dep.metadata:
parts = m.split(' ', 1)
if len(parts) == 1:
ns = None
localName = parts[0]
dep_elem.setAttributeNS(None, localName, dep.metadata[m])
else:
ns, localName = parts
prefixes.setAttributeNS(dep_elem, ns, localName, dep.metadata[m])
for b in dep.bindings:
dep_elem.appendChild(b._toxml(doc))
for command in self.commands:
root.appendChild(command._toxml(doc, prefixes))
for ns, prefix in prefixes.prefixes.items():
root.setAttributeNS(XMLNS_NAMESPACE, 'xmlns:' + prefix, ns)
return doc
def __repr__(self):
return "Selections for " + self.interface
def download_missing(self, config, _old = None, include_packages = False):
"""Check all selected implementations are available.
Download any that are not present.
Note: package implementations (distribution packages) are ignored unless include_packages is True.
@param config: used to get iface_cache, stores and fetcher
@param include_packages: also install native distribution packages
@return: a L{tasks.Blocker} or None"""
from zeroinstall.zerostore import NotStored
if _old:
config = get_deprecated_singleton_config()
iface_cache = config.iface_cache
stores = config.stores
# Check that every required selection is cached
def needs_download(sel):
if sel.id.startswith('package:'):
return include_packages
elif not sel.local_path:
try:
stores.lookup_any(sel.digests)
except NotStored:
return True
return False
needed_downloads = list(filter(needs_download, self.selections.values()))
if not needed_downloads:
return
if config.network_use == model.network_offline:
from zeroinstall import NeedDownload
raise NeedDownload(', '.join([str(x) for x in needed_downloads]))
@tasks.async
def download():
# We're missing some. For each one, get the feed it came from
# and find the corresponding <implementation> in that. This will
# tell us where to get it from.
# Note: we look for an implementation with the same ID. Maybe we
# should check it has the same digest(s) too?
needed_impls = []
for sel in needed_downloads:
feed_url = sel.attrs.get('from-feed', None) or sel.attrs['interface']
feed = iface_cache.get_feed(feed_url)
if feed is None or sel.id not in feed.implementations:
fetch_feed = config.fetcher.download_and_import_feed(feed_url, iface_cache)
yield fetch_feed
tasks.check(fetch_feed)
feed = iface_cache.get_feed(feed_url)
assert feed, "Failed to get feed for %s" % feed_url
impl = feed.implementations[sel.id]
needed_impls.append(impl)
fetch_impls = config.fetcher.download_impls(needed_impls, stores)
yield fetch_impls
tasks.check(fetch_impls)
return download()
# These (deprecated) methods are to make a Selections object look like the old Policy.implementation map...
def __getitem__(self, key):
# Deprecated
if isinstance(key, basestring):
return self.selections[key]
sel = self.selections[key.uri]
return sel and sel.impl
def iteritems(self):
# Deprecated
iface_cache = get_deprecated_singleton_config().iface_cache
for (uri, sel) in self.selections.iteritems():
yield (iface_cache.get_interface(uri), sel and sel.impl)
def values(self):
# Deprecated
for (uri, sel) in self.selections.iteritems():
yield sel and sel.impl
def __iter__(self):
# Deprecated
iface_cache = get_deprecated_singleton_config().iface_cache
for (uri, sel) in self.selections.iteritems():
yield iface_cache.get_interface(uri)
def get(self, iface, if_missing):
# Deprecated
sel = self.selections.get(iface.uri, None)
if sel:
return sel.impl
return if_missing
def copy(self):
# Deprecated
s = Selections(None)
s.interface = self.interface
s.selections = self.selections.copy()
return s
def items(self):
# Deprecated
return list(self.iteritems())
| timdiels/zeroinstall | zeroinstall/injector/selections.py | Python | lgpl-2.1 | 11,955 |
import time
import sys
import re
ignore_regex = (
#Twitter Usernames
r"""(@[\w]+)"""
,
#Twitter Hashtags
r"""(#[\w]+)"""
,
#URLs
r"""(http[s]?://[\w_./]+)"""
,
#HTML Entities
r"""(&[a-z]+;)"""
#,
#Non-Alphabet Word
#r"""([^a-z^A-Z]+)"""
)
stop_list = [w.strip() for w in open("data/stop_words.txt","rb").readlines()]
def tokenize(text):
for i in ignore_regex:
text = re.sub(i, ' ', text)
# Split by all alpha number characters except "`,-,_"
tokens = re.split("[\s,.?!:)({}\"=*\[\]|;^<>~]+", text)
filtered_tokens = set()
for t in tokens:
# Select only alphabetical words which can have "',-,_" in them. Length of word must be > 2.
if re.match("(^[a-z][a-z'\-_]*[a-z]$)",t) is not None and t not in stop_list:
filtered_tokens.add(t.lower())
return filtered_tokens
if __name__ == "__main__":
for l in open(sys.argv[1],"rb").readlines():
wl = tokenize(l)
print " ".join(wl)
| nkoilada/twitter_sentiment | tokens.py | Python | lgpl-2.1 | 915 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import gettext
def create_users_and_groups(args):
if os.path.exists(args.homes_prefix):
if not os.path.isdir(args.homes_prefix):
print _("path '%s' is not a directory") % args.homes_prefix
return 3
else:
try:
print _("make path to homes: '%s'") % args.homes_prefix
os.makedirs(args.homes_prefix,0755)
except OSError, e:
print e
return 3
user_group_map=dict()
file_descr=open(args.prefix+"user_in_groups_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
groups_line=tupl[1].strip()
groups_tupl=groups_line.split(',')
groups=list()
for group in groups_tupl:
groups.append(group.strip(' \t\n\r'))
user_group_map[user]=groups
file_descr.close()
file_descr=open(args.prefix+"groups_map","r")
for line in file_descr:
tupl=line.split(':')
group=tupl[0].strip()
command_line = "groupadd --force '%s'" % group
print _("create group: '%s'") % group
if os.system(command_line):
return 1
#print command_line
file_descr.close()
file_descr=open(args.prefix+"users_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
command_line="useradd "
command_line+="--create-home --home \'%s/%s\' " % (args.homes_prefix, user)
command_line+="--gid '%s' " % user_group_map[user][0]
groups_line=""
for i in xrange (1, len(user_group_map[user])):
groups_line+="%s," % user_group_map[user][i]
groups_line=groups_line.strip("\t\r '")
if groups_line != "":
command_line+="--groups '%s'"
command_line+=" '%s'" % user
print _("create user: '%s'") % user
if os.system(command_line):
return 1
#print command_line
file_descr.close()
def delete_users_and_groups(args):
file_descr=open(args.prefix+"users_map","r")
for line in file_descr:
tupl=line.split(':')
user=tupl[0].strip()
command_line="userdel '%s'" % user
print _("delete user: '%s'") % user
if os.system(command_line):
print _(" warning for user '%s'") % user
#print command_line
file_descr.close()
file_descr=open(args.prefix+"groups_map","r")
for line in file_descr:
tupl=line.split(':')
group=tupl[0].strip()
command_line = "groupdel '%s'" % group
print _("delete group: '%s'") % group
if os.system(command_line):
print _(" warning for group '%s'") % group
#print command_line
file_descr.close()
return 0
def main(argv=None):
"""
То, с чего начинается программа
"""
if argv == None:
argv=sys.argv
gettext.install('pseudo-cluster')
parser = argparse.ArgumentParser(
description=\
_("""
Данный скрипт создаёт пользователей и группы,
а так же добавляет пользователей в группы. При этом
пользователи и группы берутся из специальных файлов,
которые предоставляется утилитами разбора статистики.
Например утилитой parse_slurm_db.py
"""),
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=_("Например можно запустить так:\n ")+argv[0]+" --prefix /tmp/cluster_name_"
)
parser.add_argument(
'--prefix',
dest='prefix',
required=True,
default="./",
help=_("префикс, по которому находятся файлы с отображениями пользователей")
)
parser.add_argument(
'--mode',
dest='mode',
required=False,
choices=["create","delete"],
default="create",
help=\
_("""
определяет режим в котором всё работает:
create -- создаются пользователи и группы,
а так же домашние каталоги
пользователей.
delete -- удаляются пользователи и группы,
каталоги пользователей остаются неизменными.
""")
)
parser.add_argument(
'--homes-prefix',
dest='homes_prefix',
required=False,
default="/home/pseudo_cluster_users",
help=_("префикс, по которому находятся каталоги пользователей псевдокластера")
)
args=parser.parse_args()
if os.geteuid() != 0:
print _("""
Данная программа требует
полномочий пользователя root.
Запустите её от имени пользователя root,
либо с использованием команды sudo.
""")
return 2
if args.mode == "create":
return create_users_and_groups(args)
if args.mode == "delete":
return delete_users_and_groups(args)
return 100
if __name__ == "__main__":
sys.exit(main())
| pseudo-cluster/pseudo-cluster | scripts/pseudo_users_and_groups_operations.py | Python | lgpl-2.1 | 5,936 |
#!/usr/bin/env python
'''
mavlink python utility functions
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
import socket, math, struct, time, os, fnmatch, array, sys, errno
import select
# adding these extra imports allows pymavlink to be used directly with pyinstaller
# without having complex spec files. To allow for installs that don't have ardupilotmega
# at all we avoid throwing an exception if it isn't installed
try:
import json
from pymavlink.dialects.v10 import ardupilotmega
except Exception:
pass
# maximum packet length for a single receive call - use the UDP limit
UDP_MAX_PACKET_LEN = 65535
'''
Support having a $HOME/.pymavlink/mavextra.py for extra graphing functions
'''
home = os.getenv('HOME')
if home is not None:
extra = os.path.join(home, '.pymavlink', 'mavextra.py')
if os.path.exists(extra):
import imp
mavuser = imp.load_source('pymavlink.mavuser', extra)
from pymavlink.mavuser import *
# Store the MAVLink library for the currently-selected dialect
# (set by set_dialect())
mavlink = None
# Store the mavlink file currently being operated on
# (set by mavlink_connection())
mavfile_global = None
# If the caller hasn't specified a particular native/legacy version, use this
default_native = False
# Use a globally-set MAVLink dialect if one has been specified as an environment variable.
if not 'MAVLINK_DIALECT' in os.environ:
os.environ['MAVLINK_DIALECT'] = 'ardupilotmega'
def mavlink10():
'''return True if using MAVLink 1.0'''
return not 'MAVLINK09' in os.environ
def evaluate_expression(expression, vars):
'''evaluation an expression'''
try:
v = eval(expression, globals(), vars)
except NameError:
return None
except ZeroDivisionError:
return None
return v
def evaluate_condition(condition, vars):
'''evaluation a conditional (boolean) statement'''
if condition is None:
return True
v = evaluate_expression(condition, vars)
if v is None:
return False
return v
class location(object):
'''represent a GPS coordinate'''
def __init__(self, lat, lng, alt=0, heading=0):
self.lat = lat
self.lng = lng
self.alt = alt
self.heading = heading
def __str__(self):
return "lat=%.6f,lon=%.6f,alt=%.1f" % (self.lat, self.lng, self.alt)
def set_dialect(dialect):
'''set the MAVLink dialect to work with.
For example, set_dialect("ardupilotmega")
'''
global mavlink, current_dialect
from .generator import mavparse
if mavlink is None or mavlink.WIRE_PROTOCOL_VERSION == "1.0" or not 'MAVLINK09' in os.environ:
wire_protocol = mavparse.PROTOCOL_1_0
modname = "pymavlink.dialects.v10." + dialect
else:
wire_protocol = mavparse.PROTOCOL_0_9
modname = "pymavlink.dialects.v09." + dialect
try:
mod = __import__(modname)
except Exception:
# auto-generate the dialect module
from .generator.mavgen import mavgen_python_dialect
mavgen_python_dialect(dialect, wire_protocol)
mod = __import__(modname)
components = modname.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
current_dialect = dialect
mavlink = mod
# Set the default dialect. This is done here as it needs to be after the function declaration
set_dialect(os.environ['MAVLINK_DIALECT'])
class mavfile(object):
'''a generic mavlink port'''
def __init__(self, fd, address, source_system=255, notimestamps=False, input=True, use_native=default_native):
global mavfile_global
if input:
mavfile_global = self
self.fd = fd
self.address = address
self.messages = { 'MAV' : self }
if mavlink.WIRE_PROTOCOL_VERSION == "1.0":
self.messages['HOME'] = mavlink.MAVLink_gps_raw_int_message(0,0,0,0,0,0,0,0,0,0)
mavlink.MAVLink_waypoint_message = mavlink.MAVLink_mission_item_message
else:
self.messages['HOME'] = mavlink.MAVLink_gps_raw_message(0,0,0,0,0,0,0,0,0)
self.params = {}
self.target_system = 0
self.target_component = 0
self.source_system = source_system
self.first_byte = True
self.robust_parsing = True
self.mav = mavlink.MAVLink(self, srcSystem=self.source_system, use_native=use_native)
self.mav.robust_parsing = self.robust_parsing
self.logfile = None
self.logfile_raw = None
self.param_fetch_in_progress = False
self.param_fetch_complete = False
self.start_time = time.time()
self.flightmode = "UNKNOWN"
self.vehicle_type = "UNKNOWN"
self.mav_type = mavlink.MAV_TYPE_FIXED_WING
self.base_mode = 0
self.timestamp = 0
self.message_hooks = []
self.idle_hooks = []
self.uptime = 0.0
self.notimestamps = notimestamps
self._timestamp = None
self.ground_pressure = None
self.ground_temperature = None
self.altitude = 0
self.WIRE_PROTOCOL_VERSION = mavlink.WIRE_PROTOCOL_VERSION
self.last_seq = {}
self.mav_loss = 0
self.mav_count = 0
self.stop_on_EOF = False
self.portdead = False
def auto_mavlink_version(self, buf):
'''auto-switch mavlink protocol version'''
global mavlink
if len(buf) == 0:
return
try:
magic = ord(buf[0])
except:
magic = buf[0]
if not magic in [ 85, 254 ]:
return
self.first_byte = False
if self.WIRE_PROTOCOL_VERSION == "0.9" and magic == 254:
self.WIRE_PROTOCOL_VERSION = "1.0"
set_dialect(current_dialect)
elif self.WIRE_PROTOCOL_VERSION == "1.0" and magic == 85:
self.WIRE_PROTOCOL_VERSION = "0.9"
set_dialect(current_dialect)
os.environ['MAVLINK09'] = '1'
else:
return
# switch protocol
(callback, callback_args, callback_kwargs) = (self.mav.callback,
self.mav.callback_args,
self.mav.callback_kwargs)
self.mav = mavlink.MAVLink(self, srcSystem=self.source_system)
self.mav.robust_parsing = self.robust_parsing
self.WIRE_PROTOCOL_VERSION = mavlink.WIRE_PROTOCOL_VERSION
(self.mav.callback, self.mav.callback_args, self.mav.callback_kwargs) = (callback,
callback_args,
callback_kwargs)
def recv(self, n=None):
'''default recv method'''
raise RuntimeError('no recv() method supplied')
def close(self, n=None):
'''default close method'''
raise RuntimeError('no close() method supplied')
def write(self, buf):
'''default write method'''
raise RuntimeError('no write() method supplied')
def select(self, timeout):
'''wait for up to timeout seconds for more data'''
if self.fd is None:
time.sleep(min(timeout,0.5))
return True
try:
(rin, win, xin) = select.select([self.fd], [], [], timeout)
except select.error:
return False
return len(rin) == 1
def pre_message(self):
'''default pre message call'''
return
def set_rtscts(self, enable):
'''enable/disable RTS/CTS if applicable'''
return
def post_message(self, msg):
'''default post message call'''
if '_posted' in msg.__dict__:
return
msg._posted = True
msg._timestamp = time.time()
type = msg.get_type()
if type != 'HEARTBEAT' or (msg.type != mavlink.MAV_TYPE_GCS and msg.type != mavlink.MAV_TYPE_GIMBAL):
self.messages[type] = msg
if 'usec' in msg.__dict__:
self.uptime = msg.usec * 1.0e-6
if 'time_boot_ms' in msg.__dict__:
self.uptime = msg.time_boot_ms * 1.0e-3
if self._timestamp is not None:
if self.notimestamps:
msg._timestamp = self.uptime
else:
msg._timestamp = self._timestamp
src_system = msg.get_srcSystem()
src_component = msg.get_srcComponent()
src_tuple = (src_system, src_component)
radio_tuple = (ord('3'), ord('D'))
if not (src_tuple == radio_tuple or msg.get_type() == 'BAD_DATA'):
if not src_tuple in self.last_seq:
last_seq = -1
else:
last_seq = self.last_seq[src_tuple]
seq = (last_seq+1) % 256
seq2 = msg.get_seq()
if seq != seq2 and last_seq != -1:
diff = (seq2 - seq) % 256
self.mav_loss += diff
#print("lost %u seq=%u seq2=%u last_seq=%u src_system=%u %s" % (diff, seq, seq2, last_seq, src_system, msg.get_type()))
self.last_seq[src_tuple] = seq2
self.mav_count += 1
self.timestamp = msg._timestamp
if type == 'HEARTBEAT' and msg.get_srcComponent() != mavlink.MAV_COMP_ID_GIMBAL:
self.target_system = msg.get_srcSystem()
self.target_component = msg.get_srcComponent()
if mavlink.WIRE_PROTOCOL_VERSION == '1.0' and msg.type != mavlink.MAV_TYPE_GCS:
self.flightmode = mode_string_v10(msg)
self.mav_type = msg.type
self.base_mode = msg.base_mode
elif type == 'PARAM_VALUE':
s = str(msg.param_id)
self.params[str(msg.param_id)] = msg.param_value
if msg.param_index+1 == msg.param_count:
self.param_fetch_in_progress = False
self.param_fetch_complete = True
elif type == 'SYS_STATUS' and mavlink.WIRE_PROTOCOL_VERSION == '0.9':
self.flightmode = mode_string_v09(msg)
elif type == 'GPS_RAW':
if self.messages['HOME'].fix_type < 2:
self.messages['HOME'] = msg
elif type == 'GPS_RAW_INT':
if self.messages['HOME'].fix_type < 3:
self.messages['HOME'] = msg
for hook in self.message_hooks:
hook(self, msg)
def packet_loss(self):
'''packet loss as a percentage'''
if self.mav_count == 0:
return 0
return (100.0*self.mav_loss)/(self.mav_count+self.mav_loss)
def recv_msg(self):
'''message receive routine'''
self.pre_message()
while True:
n = self.mav.bytes_needed()
s = self.recv(n)
numnew = len(s)
if numnew != 0:
if self.logfile_raw:
self.logfile_raw.write(str(s))
if self.first_byte:
self.auto_mavlink_version(s)
# We always call parse_char even if the new string is empty, because the existing message buf might already have some valid packet
# we can extract
msg = self.mav.parse_char(s)
if msg:
if self.logfile and msg.get_type() != 'BAD_DATA' :
usec = int(time.time() * 1.0e6) & ~3
self.logfile.write(str(struct.pack('>Q', usec) + msg.get_msgbuf()))
self.post_message(msg)
return msg
else:
# if we failed to parse any messages _and_ no new bytes arrived, return immediately so the client has the option to
# timeout
if numnew == 0:
return None
def recv_match(self, condition=None, type=None, blocking=False, timeout=None):
'''recv the next MAVLink message that matches the given condition
type can be a string or a list of strings'''
if type is not None and not isinstance(type, list):
type = [type]
start_time = time.time()
while True:
if timeout is not None:
now = time.time()
if now < start_time:
start_time = now # If an external process rolls back system time, we should not spin forever.
if start_time + timeout < time.time():
return None
m = self.recv_msg()
if m is None:
if blocking:
for hook in self.idle_hooks:
hook(self)
if timeout is None:
self.select(0.05)
else:
self.select(timeout/2)
continue
return None
if type is not None and not m.get_type() in type:
continue
if not evaluate_condition(condition, self.messages):
continue
return m
def check_condition(self, condition):
'''check if a condition is true'''
return evaluate_condition(condition, self.messages)
def mavlink10(self):
'''return True if using MAVLink 1.0'''
return self.WIRE_PROTOCOL_VERSION == "1.0"
def setup_logfile(self, logfile, mode='w'):
'''start logging to the given logfile, with timestamps'''
self.logfile = open(logfile, mode=mode)
def setup_logfile_raw(self, logfile, mode='w'):
'''start logging raw bytes to the given logfile, without timestamps'''
self.logfile_raw = open(logfile, mode=mode)
def wait_heartbeat(self, blocking=True):
'''wait for a heartbeat so we know the target system IDs'''
return self.recv_match(type='HEARTBEAT', blocking=blocking)
def param_fetch_all(self):
'''initiate fetch of all parameters'''
if time.time() - getattr(self, 'param_fetch_start', 0) < 2.0:
# don't fetch too often
return
self.param_fetch_start = time.time()
self.param_fetch_in_progress = True
self.mav.param_request_list_send(self.target_system, self.target_component)
def param_fetch_one(self, name):
'''initiate fetch of one parameter'''
try:
idx = int(name)
self.mav.param_request_read_send(self.target_system, self.target_component, "", idx)
except Exception:
self.mav.param_request_read_send(self.target_system, self.target_component, name, -1)
def time_since(self, mtype):
'''return the time since the last message of type mtype was received'''
if not mtype in self.messages:
return time.time() - self.start_time
return time.time() - self.messages[mtype]._timestamp
def param_set_send(self, parm_name, parm_value, parm_type=None):
'''wrapper for parameter set'''
if self.mavlink10():
if parm_type == None:
parm_type = mavlink.MAVLINK_TYPE_FLOAT
self.mav.param_set_send(self.target_system, self.target_component,
parm_name, parm_value, parm_type)
else:
self.mav.param_set_send(self.target_system, self.target_component,
parm_name, parm_value)
def waypoint_request_list_send(self):
'''wrapper for waypoint_request_list_send'''
if self.mavlink10():
self.mav.mission_request_list_send(self.target_system, self.target_component)
else:
self.mav.waypoint_request_list_send(self.target_system, self.target_component)
def waypoint_clear_all_send(self):
'''wrapper for waypoint_clear_all_send'''
if self.mavlink10():
self.mav.mission_clear_all_send(self.target_system, self.target_component)
else:
self.mav.waypoint_clear_all_send(self.target_system, self.target_component)
def waypoint_request_send(self, seq):
'''wrapper for waypoint_request_send'''
if self.mavlink10():
self.mav.mission_request_send(self.target_system, self.target_component, seq)
else:
self.mav.waypoint_request_send(self.target_system, self.target_component, seq)
def waypoint_set_current_send(self, seq):
'''wrapper for waypoint_set_current_send'''
if self.mavlink10():
self.mav.mission_set_current_send(self.target_system, self.target_component, seq)
else:
self.mav.waypoint_set_current_send(self.target_system, self.target_component, seq)
def waypoint_current(self):
'''return current waypoint'''
if self.mavlink10():
m = self.recv_match(type='MISSION_CURRENT', blocking=True)
else:
m = self.recv_match(type='WAYPOINT_CURRENT', blocking=True)
return m.seq
def waypoint_count_send(self, seq):
'''wrapper for waypoint_count_send'''
if self.mavlink10():
self.mav.mission_count_send(self.target_system, self.target_component, seq)
else:
self.mav.waypoint_count_send(self.target_system, self.target_component, seq)
def set_mode_flag(self, flag, enable):
'''
Enables/ disables MAV_MODE_FLAG
@param flag The mode flag,
see MAV_MODE_FLAG enum
@param enable Enable the flag, (True/False)
'''
if self.mavlink10():
mode = self.base_mode
if (enable == True):
mode = mode | flag
elif (enable == False):
mode = mode & ~flag
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_DO_SET_MODE, 0,
mode,
0, 0, 0, 0, 0, 0)
else:
print("Set mode flag not supported")
def set_mode_auto(self):
'''enter auto mode'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_MISSION_START, 0, 0, 0, 0, 0, 0, 0, 0)
else:
MAV_ACTION_SET_AUTO = 13
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_SET_AUTO)
def mode_mapping(self):
'''return dictionary mapping mode names to numbers, or None if unknown'''
mav_type = self.field('HEARTBEAT', 'type', self.mav_type)
if mav_type is None:
return None
map = None
if mav_type in [mavlink.MAV_TYPE_QUADROTOR,
mavlink.MAV_TYPE_HELICOPTER,
mavlink.MAV_TYPE_HEXAROTOR,
mavlink.MAV_TYPE_OCTOROTOR,
mavlink.MAV_TYPE_TRICOPTER]:
map = mode_mapping_acm
if mav_type == mavlink.MAV_TYPE_FIXED_WING:
map = mode_mapping_apm
if mav_type == mavlink.MAV_TYPE_GROUND_ROVER:
map = mode_mapping_rover
if mav_type == mavlink.MAV_TYPE_ANTENNA_TRACKER:
map = mode_mapping_tracker
if map is None:
return None
inv_map = dict((a, b) for (b, a) in list(map.items()))
return inv_map
def set_mode(self, mode):
'''enter arbitrary mode'''
if isinstance(mode, str):
map = self.mode_mapping()
if map is None or mode not in map:
print("Unknown mode '%s'" % mode)
return
mode = map[mode]
self.mav.set_mode_send(self.target_system,
mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,
mode)
def set_mode_rtl(self):
'''enter RTL mode'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_NAV_RETURN_TO_LAUNCH, 0, 0, 0, 0, 0, 0, 0, 0)
else:
MAV_ACTION_RETURN = 3
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_RETURN)
def set_mode_manual(self):
'''enter MANUAL mode'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_DO_SET_MODE, 0,
mavlink.MAV_MODE_MANUAL_ARMED,
0, 0, 0, 0, 0, 0)
else:
MAV_ACTION_SET_MANUAL = 12
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_SET_MANUAL)
def set_mode_fbwa(self):
'''enter FBWA mode'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_DO_SET_MODE, 0,
mavlink.MAV_MODE_STABILIZE_ARMED,
0, 0, 0, 0, 0, 0)
else:
print("Forcing FBWA not supported")
def set_mode_loiter(self):
'''enter LOITER mode'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_NAV_LOITER_UNLIM, 0, 0, 0, 0, 0, 0, 0, 0)
else:
MAV_ACTION_LOITER = 27
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_LOITER)
def set_servo(self, channel, pwm):
'''set a servo value'''
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_DO_SET_SERVO, 0,
channel, pwm,
0, 0, 0, 0, 0)
def set_relay(self, relay_pin=0, state=True):
'''Set relay_pin to value of state'''
if self.mavlink10():
self.mav.command_long_send(
self.target_system, # target_system
self.target_component, # target_component
mavlink.MAV_CMD_DO_SET_RELAY, # command
0, # Confirmation
relay_pin, # Relay Number
int(state), # state (1 to indicate arm)
0, # param3 (all other params meaningless)
0, # param4
0, # param5
0, # param6
0) # param7
else:
print("Setting relays not supported.")
def calibrate_level(self):
'''calibrate accels (1D version)'''
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
1, 1, 0, 0, 0, 0, 0)
def calibrate_pressure(self):
'''calibrate pressure'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
0, 0, 1, 0, 0, 0, 0)
else:
MAV_ACTION_CALIBRATE_PRESSURE = 20
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_CALIBRATE_PRESSURE)
def reboot_autopilot(self, hold_in_bootloader=False):
'''reboot the autopilot'''
if self.mavlink10():
if hold_in_bootloader:
param1 = 3
else:
param1 = 1
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, 0,
param1, 0, 0, 0, 0, 0, 0)
# send an old style reboot immediately afterwards in case it is an older firmware
# that doesn't understand the new convention
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, 0,
1, 0, 0, 0, 0, 0, 0)
def wait_gps_fix(self):
self.recv_match(type='VFR_HUD', blocking=True)
if self.mavlink10():
self.recv_match(type='GPS_RAW_INT', blocking=True,
condition='GPS_RAW_INT.fix_type==3 and GPS_RAW_INT.lat != 0 and GPS_RAW_INT.alt != 0')
else:
self.recv_match(type='GPS_RAW', blocking=True,
condition='GPS_RAW.fix_type==2 and GPS_RAW.lat != 0 and GPS_RAW.alt != 0')
def location(self, relative_alt=False):
'''return current location'''
self.wait_gps_fix()
# wait for another VFR_HUD, to ensure we have correct altitude
self.recv_match(type='VFR_HUD', blocking=True)
self.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
if relative_alt:
alt = self.messages['GLOBAL_POSITION_INT'].relative_alt*0.001
else:
alt = self.messages['VFR_HUD'].alt
return location(self.messages['GPS_RAW_INT'].lat*1.0e-7,
self.messages['GPS_RAW_INT'].lon*1.0e-7,
alt,
self.messages['VFR_HUD'].heading)
def arducopter_arm(self):
'''arm motors (arducopter only)'''
if self.mavlink10():
self.mav.command_long_send(
self.target_system, # target_system
self.target_component,
mavlink.MAV_CMD_COMPONENT_ARM_DISARM, # command
0, # confirmation
1, # param1 (1 to indicate arm)
0, # param2 (all other params meaningless)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
def arducopter_disarm(self):
'''calibrate pressure'''
if self.mavlink10():
self.mav.command_long_send(
self.target_system, # target_system
self.target_component,
mavlink.MAV_CMD_COMPONENT_ARM_DISARM, # command
0, # confirmation
0, # param1 (0 to indicate disarm)
0, # param2 (all other params meaningless)
0, # param3
0, # param4
0, # param5
0, # param6
0) # param7
def motors_armed(self):
'''return true if motors armed'''
if not 'HEARTBEAT' in self.messages:
return False
m = self.messages['HEARTBEAT']
return (m.base_mode & mavlink.MAV_MODE_FLAG_SAFETY_ARMED) != 0
def motors_armed_wait(self):
'''wait for motors to be armed'''
while True:
m = self.wait_heartbeat()
if self.motors_armed():
return
def motors_disarmed_wait(self):
'''wait for motors to be disarmed'''
while True:
m = self.wait_heartbeat()
if not self.motors_armed():
return
def field(self, type, field, default=None):
'''convenient function for returning an arbitrary MAVLink
field with a default'''
if not type in self.messages:
return default
return getattr(self.messages[type], field, default)
def param(self, name, default=None):
'''convenient function for returning an arbitrary MAVLink
parameter with a default'''
if not name in self.params:
return default
return self.params[name]
def set_close_on_exec(fd):
'''set the clone on exec flag on a file descriptor. Ignore exceptions'''
try:
import fcntl
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
except Exception:
pass
class mavserial(mavfile):
'''a serial mavlink port'''
def __init__(self, device, baud=115200, autoreconnect=False, source_system=255, use_native=default_native):
import serial
if ',' in device and not os.path.exists(device):
device, baud = device.split(',')
self.baud = baud
self.device = device
self.autoreconnect = autoreconnect
# we rather strangely set the baudrate initially to 1200, then change to the desired
# baudrate. This works around a kernel bug on some Linux kernels where the baudrate
# is not set correctly
self.port = serial.Serial(self.device, 1200, timeout=0,
dsrdtr=False, rtscts=False, xonxoff=False)
try:
fd = self.port.fileno()
set_close_on_exec(fd)
except Exception:
fd = None
self.set_baudrate(self.baud)
mavfile.__init__(self, fd, device, source_system=source_system, use_native=use_native)
self.rtscts = False
def set_rtscts(self, enable):
'''enable/disable RTS/CTS if applicable'''
self.port.setRtsCts(enable)
self.rtscts = enable
def set_baudrate(self, baudrate):
'''set baudrate'''
self.port.setBaudrate(baudrate)
def close(self):
self.port.close()
def recv(self,n=None):
if n is None:
n = self.mav.bytes_needed()
if self.fd is None:
waiting = self.port.inWaiting()
if waiting < n:
n = waiting
ret = self.port.read(n)
return ret
def write(self, buf):
try:
if not isinstance(buf, str):
buf = str(buf)
return self.port.write(buf)
except Exception:
if not self.portdead:
print("Device %s is dead" % self.device)
self.portdead = True
if self.autoreconnect:
self.reset()
return -1
def reset(self):
import serial
try:
newport = serial.Serial(self.device, self.baud, timeout=0,
dsrdtr=False, rtscts=False, xonxoff=False)
self.port.close()
self.port = newport
print("Device %s reopened OK" % self.device)
self.portdead = False
try:
self.fd = self.port.fileno()
except Exception:
self.fd = None
if self.rtscts:
self.set_rtscts(self.rtscts)
return True
except Exception:
return False
class mavudp(mavfile):
'''a UDP mavlink socket'''
def __init__(self, device, input=True, broadcast=False, source_system=255, use_native=default_native):
a = device.split(':')
if len(a) != 2:
print("UDP ports must be specified as host:port")
sys.exit(1)
self.port = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp_server = input
if input:
self.port.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.port.bind((a[0], int(a[1])))
else:
self.destination_addr = (a[0], int(a[1]))
if broadcast:
self.port.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
set_close_on_exec(self.port.fileno())
self.port.setblocking(0)
self.last_address = None
mavfile.__init__(self, self.port.fileno(), device, source_system=source_system, input=input, use_native=use_native)
def close(self):
self.port.close()
def recv(self,n=None):
try:
data, self.last_address = self.port.recvfrom(UDP_MAX_PACKET_LEN)
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK, errno.ECONNREFUSED ]:
return ""
raise
return data
def write(self, buf):
try:
if self.udp_server:
if self.last_address:
self.port.sendto(buf, self.last_address)
else:
self.port.sendto(buf, self.destination_addr)
except socket.error:
pass
def recv_msg(self):
'''message receive routine for UDP link'''
self.pre_message()
s = self.recv()
if len(s) == 0:
return None
if self.first_byte:
self.auto_mavlink_version(s)
msg = self.mav.parse_buffer(s)
if msg is not None:
for m in msg:
self.post_message(m)
return msg[0]
return None
class mavtcp(mavfile):
'''a TCP mavlink socket'''
def __init__(self, device, source_system=255, retries=3, use_native=default_native):
a = device.split(':')
if len(a) != 2:
print("TCP ports must be specified as host:port")
sys.exit(1)
self.port = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.destination_addr = (a[0], int(a[1]))
while retries >= 0:
retries -= 1
if retries <= 0:
self.port.connect(self.destination_addr)
else:
try:
self.port.connect(self.destination_addr)
break
except Exception as e:
if retries > 0:
print(e, "sleeping")
time.sleep(1)
continue
self.port.setblocking(0)
set_close_on_exec(self.port.fileno())
self.port.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
mavfile.__init__(self, self.port.fileno(), "tcp:" + device, source_system=source_system, use_native=use_native)
def close(self):
self.port.close()
def recv(self,n=None):
if n is None:
n = self.mav.bytes_needed()
try:
data = self.port.recv(n)
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]:
return ""
raise
return data
def write(self, buf):
try:
self.port.send(buf)
except socket.error:
pass
class mavlogfile(mavfile):
'''a MAVLink logfile reader/writer'''
def __init__(self, filename, planner_format=None,
write=False, append=False,
robust_parsing=True, notimestamps=False, source_system=255, use_native=default_native):
self.filename = filename
self.writeable = write
self.robust_parsing = robust_parsing
self.planner_format = planner_format
self._two64 = math.pow(2.0, 63)
mode = 'rb'
if self.writeable:
if append:
mode = 'ab'
else:
mode = 'wb'
self.f = open(filename, mode)
self.filesize = os.path.getsize(filename)
self.percent = 0
mavfile.__init__(self, None, filename, source_system=source_system, notimestamps=notimestamps, use_native=use_native)
if self.notimestamps:
self._timestamp = 0
else:
self._timestamp = time.time()
self.stop_on_EOF = True
self._last_message = None
self._last_timestamp = None
def close(self):
self.f.close()
def recv(self,n=None):
if n is None:
n = self.mav.bytes_needed()
return self.f.read(n)
def write(self, buf):
self.f.write(buf)
def scan_timestamp(self, tbuf):
'''scan forward looking in a tlog for a timestamp in a reasonable range'''
while True:
(tusec,) = struct.unpack('>Q', tbuf)
t = tusec * 1.0e-6
if abs(t - self._last_timestamp) <= 3*24*60*60:
break
c = self.f.read(1)
if len(c) != 1:
break
tbuf = tbuf[1:] + c
return t
def pre_message(self):
'''read timestamp if needed'''
# read the timestamp
if self.filesize != 0:
self.percent = (100.0 * self.f.tell()) / self.filesize
if self.notimestamps:
return
if self.planner_format:
tbuf = self.f.read(21)
if len(tbuf) != 21 or tbuf[0] != '-' or tbuf[20] != ':':
raise RuntimeError('bad planner timestamp %s' % tbuf)
hnsec = self._two64 + float(tbuf[0:20])
t = hnsec * 1.0e-7 # convert to seconds
t -= 719163 * 24 * 60 * 60 # convert to 1970 base
self._link = 0
else:
tbuf = self.f.read(8)
if len(tbuf) != 8:
return
(tusec,) = struct.unpack('>Q', tbuf)
t = tusec * 1.0e-6
if (self._last_timestamp is not None and
self._last_message.get_type() == "BAD_DATA" and
abs(t - self._last_timestamp) > 3*24*60*60):
t = self.scan_timestamp(tbuf)
self._link = tusec & 0x3
self._timestamp = t
def post_message(self, msg):
'''add timestamp to message'''
# read the timestamp
super(mavlogfile, self).post_message(msg)
if self.planner_format:
self.f.read(1) # trailing newline
self.timestamp = msg._timestamp
self._last_message = msg
if msg.get_type() != "BAD_DATA":
self._last_timestamp = msg._timestamp
class mavmemlog(mavfile):
'''a MAVLink log in memory. This allows loading a log into
memory to make it easier to do multiple sweeps over a log'''
def __init__(self, mav):
mavfile.__init__(self, None, 'memlog')
self._msgs = []
self._index = 0
self._count = 0
self.messages = {}
while True:
m = mav.recv_msg()
if m is None:
break
self._msgs.append(m)
self._count = len(self._msgs)
def recv_msg(self):
'''message receive routine'''
if self._index >= self._count:
return None
m = self._msgs[self._index]
self._index += 1
self.percent = (100.0 * self._index) / self._count
self.messages[m.get_type()] = m
return m
def rewind(self):
'''rewind to start'''
self._index = 0
self.percent = 0
self.messages = {}
class mavchildexec(mavfile):
'''a MAVLink child processes reader/writer'''
def __init__(self, filename, source_system=255, use_native=default_native):
from subprocess import Popen, PIPE
import fcntl
self.filename = filename
self.child = Popen(filename, shell=False, stdout=PIPE, stdin=PIPE, bufsize=0)
self.fd = self.child.stdout.fileno()
fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
fl = fcntl.fcntl(self.child.stdout.fileno(), fcntl.F_GETFL)
fcntl.fcntl(self.child.stdout.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
mavfile.__init__(self, self.fd, filename, source_system=source_system, use_native=use_native)
def close(self):
self.child.close()
def recv(self,n=None):
try:
x = self.child.stdout.read(1)
except Exception:
return ''
return x
def write(self, buf):
self.child.stdin.write(buf)
def mavlink_connection(device, baud=115200, source_system=255,
planner_format=None, write=False, append=False,
robust_parsing=True, notimestamps=False, input=True,
dialect=None, autoreconnect=False, zero_time_base=False,
retries=3, use_native=default_native):
'''open a serial, UDP, TCP or file mavlink connection'''
global mavfile_global
if dialect is not None:
set_dialect(dialect)
if device.startswith('tcp:'):
return mavtcp(device[4:], source_system=source_system, retries=retries, use_native=use_native)
if device.startswith('udpin:'):
return mavudp(device[6:], input=True, source_system=source_system, use_native=use_native)
if device.startswith('udpout:'):
return mavudp(device[7:], input=False, source_system=source_system, use_native=use_native)
# For legacy purposes we accept the following syntax and let the caller to specify direction
if device.startswith('udp:'):
return mavudp(device[4:], input=input, source_system=source_system, use_native=use_native)
if device.lower().endswith('.bin') or device.lower().endswith('.px4log'):
# support dataflash logs
from pymavlink import DFReader
m = DFReader.DFReader_binary(device, zero_time_base=zero_time_base)
mavfile_global = m
return m
if device.endswith('.log'):
# support dataflash text logs
from pymavlink import DFReader
if DFReader.DFReader_is_text_log(device):
m = DFReader.DFReader_text(device, zero_time_base=zero_time_base)
mavfile_global = m
return m
# list of suffixes to prevent setting DOS paths as UDP sockets
logsuffixes = ['mavlink', 'log', 'raw', 'tlog' ]
suffix = device.split('.')[-1].lower()
if device.find(':') != -1 and not suffix in logsuffixes:
return mavudp(device, source_system=source_system, input=input, use_native=use_native)
if os.path.isfile(device):
if device.endswith(".elf") or device.find("/bin/") != -1:
print("executing '%s'" % device)
return mavchildexec(device, source_system=source_system, use_native=use_native)
else:
return mavlogfile(device, planner_format=planner_format, write=write,
append=append, robust_parsing=robust_parsing, notimestamps=notimestamps,
source_system=source_system, use_native=use_native)
return mavserial(device, baud=baud, source_system=source_system, autoreconnect=autoreconnect, use_native=use_native)
class periodic_event(object):
'''a class for fixed frequency events'''
def __init__(self, frequency):
self.frequency = float(frequency)
self.last_time = time.time()
def force(self):
'''force immediate triggering'''
self.last_time = 0
def trigger(self):
'''return True if we should trigger now'''
tnow = time.time()
if tnow < self.last_time:
print("Warning, time moved backwards. Restarting timer.")
self.last_time = tnow
if self.last_time + (1.0/self.frequency) <= tnow:
self.last_time = tnow
return True
return False
try:
from curses import ascii
have_ascii = True
except:
have_ascii = False
def is_printable(c):
'''see if a character is printable'''
global have_ascii
if have_ascii:
return ascii.isprint(c)
if isinstance(c, int):
ic = c
else:
ic = ord(c)
return ic >= 32 and ic <= 126
def all_printable(buf):
'''see if a string is all printable'''
for c in buf:
if not is_printable(c) and not c in ['\r', '\n', '\t']:
return False
return True
class SerialPort(object):
'''auto-detected serial port'''
def __init__(self, device, description=None, hwid=None):
self.device = device
self.description = description
self.hwid = hwid
def __str__(self):
ret = self.device
if self.description is not None:
ret += " : " + self.description
if self.hwid is not None:
ret += " : " + self.hwid
return ret
def auto_detect_serial_win32(preferred_list=['*']):
'''try to auto-detect serial ports on win32'''
try:
from serial.tools.list_ports_windows import comports
list = sorted(comports())
except:
return []
ret = []
others = []
for port, description, hwid in list:
matches = False
p = SerialPort(port, description=description, hwid=hwid)
for preferred in preferred_list:
if fnmatch.fnmatch(description, preferred) or fnmatch.fnmatch(hwid, preferred):
matches = True
if matches:
ret.append(p)
else:
others.append(p)
if len(ret) > 0:
return ret
# now the rest
ret.extend(others)
return ret
def auto_detect_serial_unix(preferred_list=['*']):
'''try to auto-detect serial ports on unix'''
import glob
glist = glob.glob('/dev/ttyS*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*') + glob.glob('/dev/serial/by-id/*')
ret = []
others = []
# try preferred ones first
for d in glist:
matches = False
for preferred in preferred_list:
if fnmatch.fnmatch(d, preferred):
matches = True
if matches:
ret.append(SerialPort(d))
else:
others.append(SerialPort(d))
if len(ret) > 0:
return ret
ret.extend(others)
return ret
def auto_detect_serial(preferred_list=['*']):
'''try to auto-detect serial port'''
# see if
if os.name == 'nt':
return auto_detect_serial_win32(preferred_list=preferred_list)
return auto_detect_serial_unix(preferred_list=preferred_list)
def mode_string_v09(msg):
'''mode string for 0.9 protocol'''
mode = msg.mode
nav_mode = msg.nav_mode
MAV_MODE_UNINIT = 0
MAV_MODE_MANUAL = 2
MAV_MODE_GUIDED = 3
MAV_MODE_AUTO = 4
MAV_MODE_TEST1 = 5
MAV_MODE_TEST2 = 6
MAV_MODE_TEST3 = 7
MAV_NAV_GROUNDED = 0
MAV_NAV_LIFTOFF = 1
MAV_NAV_HOLD = 2
MAV_NAV_WAYPOINT = 3
MAV_NAV_VECTOR = 4
MAV_NAV_RETURNING = 5
MAV_NAV_LANDING = 6
MAV_NAV_LOST = 7
MAV_NAV_LOITER = 8
cmode = (mode, nav_mode)
mapping = {
(MAV_MODE_UNINIT, MAV_NAV_GROUNDED) : "INITIALISING",
(MAV_MODE_MANUAL, MAV_NAV_VECTOR) : "MANUAL",
(MAV_MODE_TEST3, MAV_NAV_VECTOR) : "CIRCLE",
(MAV_MODE_GUIDED, MAV_NAV_VECTOR) : "GUIDED",
(MAV_MODE_TEST1, MAV_NAV_VECTOR) : "STABILIZE",
(MAV_MODE_TEST2, MAV_NAV_LIFTOFF) : "FBWA",
(MAV_MODE_AUTO, MAV_NAV_WAYPOINT) : "AUTO",
(MAV_MODE_AUTO, MAV_NAV_RETURNING) : "RTL",
(MAV_MODE_AUTO, MAV_NAV_LOITER) : "LOITER",
(MAV_MODE_AUTO, MAV_NAV_LIFTOFF) : "TAKEOFF",
(MAV_MODE_AUTO, MAV_NAV_LANDING) : "LANDING",
(MAV_MODE_AUTO, MAV_NAV_HOLD) : "LOITER",
(MAV_MODE_GUIDED, MAV_NAV_VECTOR) : "GUIDED",
(MAV_MODE_GUIDED, MAV_NAV_WAYPOINT) : "GUIDED",
(100, MAV_NAV_VECTOR) : "STABILIZE",
(101, MAV_NAV_VECTOR) : "ACRO",
(102, MAV_NAV_VECTOR) : "ALT_HOLD",
(107, MAV_NAV_VECTOR) : "CIRCLE",
(109, MAV_NAV_VECTOR) : "LAND",
}
if cmode in mapping:
return mapping[cmode]
return "Mode(%s,%s)" % cmode
mode_mapping_apm = {
0 : 'MANUAL',
1 : 'CIRCLE',
2 : 'STABILIZE',
3 : 'TRAINING',
4 : 'ACRO',
5 : 'FBWA',
6 : 'FBWB',
7 : 'CRUISE',
8 : 'AUTOTUNE',
10 : 'AUTO',
11 : 'RTL',
12 : 'LOITER',
14 : 'LAND',
15 : 'GUIDED',
16 : 'INITIALISING'
}
mode_mapping_acm = {
0 : 'STABILIZE',
1 : 'ACRO',
2 : 'ALT_HOLD',
3 : 'AUTO',
4 : 'GUIDED',
5 : 'LOITER',
6 : 'RTL',
7 : 'CIRCLE',
8 : 'POSITION',
9 : 'LAND',
10 : 'OF_LOITER',
11 : 'DRIFT',
13 : 'SPORT',
14 : 'FLIP',
15 : 'AUTOTUNE',
16 : 'POSHOLD'
}
mode_mapping_rover = {
0 : 'MANUAL',
2 : 'LEARNING',
3 : 'STEERING',
4 : 'HOLD',
10 : 'AUTO',
11 : 'RTL',
15 : 'GUIDED',
16 : 'INITIALISING'
}
mode_mapping_tracker = {
0 : 'MANUAL',
1 : 'STOP',
2 : 'SCAN',
10 : 'AUTO',
16 : 'INITIALISING'
}
mode_mapping_px4 = {
0 : 'MANUAL',
1 : 'ATTITUDE',
2 : 'EASY',
3 : 'AUTO'
}
def mode_mapping_byname(mav_type):
'''return dictionary mapping mode names to numbers, or None if unknown'''
map = None
if mav_type in [mavlink.MAV_TYPE_QUADROTOR,
mavlink.MAV_TYPE_HELICOPTER,
mavlink.MAV_TYPE_HEXAROTOR,
mavlink.MAV_TYPE_OCTOROTOR,
mavlink.MAV_TYPE_TRICOPTER]:
map = mode_mapping_acm
if mav_type == mavlink.MAV_TYPE_FIXED_WING:
map = mode_mapping_apm
if mav_type == mavlink.MAV_TYPE_GROUND_ROVER:
map = mode_mapping_rover
if mav_type == mavlink.MAV_TYPE_ANTENNA_TRACKER:
map = mode_mapping_tracker
if map is None:
return None
inv_map = dict((a, b) for (b, a) in map.items())
return inv_map
def mode_mapping_bynumber(mav_type):
'''return dictionary mapping mode numbers to name, or None if unknown'''
map = None
if mav_type in [mavlink.MAV_TYPE_QUADROTOR,
mavlink.MAV_TYPE_HELICOPTER,
mavlink.MAV_TYPE_HEXAROTOR,
mavlink.MAV_TYPE_OCTOROTOR,
mavlink.MAV_TYPE_TRICOPTER]:
map = mode_mapping_acm
if mav_type == mavlink.MAV_TYPE_FIXED_WING:
map = mode_mapping_apm
if mav_type == mavlink.MAV_TYPE_GROUND_ROVER:
map = mode_mapping_rover
if mav_type == mavlink.MAV_TYPE_ANTENNA_TRACKER:
map = mode_mapping_tracker
if map is None:
return None
return map
def mode_string_v10(msg):
'''mode string for 1.0 protocol, from heartbeat'''
if not msg.base_mode & mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED:
return "Mode(0x%08x)" % msg.base_mode
if msg.type in [ mavlink.MAV_TYPE_QUADROTOR, mavlink.MAV_TYPE_HEXAROTOR,
mavlink.MAV_TYPE_OCTOROTOR, mavlink.MAV_TYPE_TRICOPTER,
mavlink.MAV_TYPE_COAXIAL,
mavlink.MAV_TYPE_HELICOPTER ]:
if msg.custom_mode in mode_mapping_acm:
return mode_mapping_acm[msg.custom_mode]
if msg.type == mavlink.MAV_TYPE_FIXED_WING:
if msg.custom_mode in mode_mapping_apm:
return mode_mapping_apm[msg.custom_mode]
if msg.type == mavlink.MAV_TYPE_GROUND_ROVER:
if msg.custom_mode in mode_mapping_rover:
return mode_mapping_rover[msg.custom_mode]
if msg.type == mavlink.MAV_TYPE_ANTENNA_TRACKER:
if msg.custom_mode in mode_mapping_tracker:
return mode_mapping_tracker[msg.custom_mode]
return "Mode(%u)" % msg.custom_mode
def mode_string_apm(mode_number):
'''return mode string for APM:Plane'''
if mode_number in mode_mapping_apm:
return mode_mapping_apm[mode_number]
return "Mode(%u)" % mode_number
def mode_string_acm(mode_number):
'''return mode string for APM:Copter'''
if mode_number in mode_mapping_acm:
return mode_mapping_acm[mode_number]
return "Mode(%u)" % mode_number
def mode_string_px4(mode_number):
'''return mode string for PX4 flight stack'''
if mode_number in mode_mapping_px4:
return mode_mapping_px4[mode_number]
return "Mode(%u)" % mode_number
class x25crc(object):
'''x25 CRC - based on checksum.h from mavlink library'''
def __init__(self, buf=''):
self.crc = 0xffff
self.accumulate(buf)
def accumulate(self, buf):
'''add in some more bytes'''
bytes = array.array('B')
if isinstance(buf, array.array):
bytes.extend(buf)
else:
bytes.fromstring(buf)
accum = self.crc
for b in bytes:
tmp = b ^ (accum & 0xff)
tmp = (tmp ^ (tmp<<4)) & 0xFF
accum = (accum>>8) ^ (tmp<<8) ^ (tmp<<3) ^ (tmp>>4)
accum = accum & 0xFFFF
self.crc = accum
class MavlinkSerialPort():
'''an object that looks like a serial port, but
transmits using mavlink SERIAL_CONTROL packets'''
def __init__(self, portname, baudrate, devnum=0, devbaud=0, timeout=3, debug=0):
from . import mavutil
self.baudrate = 0
self.timeout = timeout
self._debug = debug
self.buf = ''
self.port = devnum
self.debug("Connecting with MAVLink to %s ..." % portname)
self.mav = mavutil.mavlink_connection(portname, autoreconnect=True, baud=baudrate)
self.mav.wait_heartbeat()
self.debug("HEARTBEAT OK\n")
if devbaud != 0:
self.setBaudrate(devbaud)
self.debug("Locked serial device\n")
def debug(self, s, level=1):
'''write some debug text'''
if self._debug >= level:
print(s)
def write(self, b):
'''write some bytes'''
from . import mavutil
self.debug("sending '%s' (0x%02x) of len %u\n" % (b, ord(b[0]), len(b)), 2)
while len(b) > 0:
n = len(b)
if n > 70:
n = 70
buf = [ord(x) for x in b[:n]]
buf.extend([0]*(70-len(buf)))
self.mav.mav.serial_control_send(self.port,
mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |
mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND,
0,
0,
n,
buf)
b = b[n:]
def _recv(self):
'''read some bytes into self.buf'''
from . import mavutil
start_time = time.time()
while time.time() < start_time + self.timeout:
m = self.mav.recv_match(condition='SERIAL_CONTROL.count!=0',
type='SERIAL_CONTROL', blocking=False, timeout=0)
if m is not None and m.count != 0:
break
self.mav.mav.serial_control_send(self.port,
mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |
mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND,
0,
0,
0, [0]*70)
m = self.mav.recv_match(condition='SERIAL_CONTROL.count!=0',
type='SERIAL_CONTROL', blocking=True, timeout=0.01)
if m is not None and m.count != 0:
break
if m is not None:
if self._debug > 2:
print(m)
data = m.data[:m.count]
self.buf += ''.join(str(chr(x)) for x in data)
def read(self, n):
'''read some bytes'''
if len(self.buf) == 0:
self._recv()
if len(self.buf) > 0:
if n > len(self.buf):
n = len(self.buf)
ret = self.buf[:n]
self.buf = self.buf[n:]
if self._debug >= 2:
for b in ret:
self.debug("read 0x%x" % ord(b), 2)
return ret
return ''
def flushInput(self):
'''flush any pending input'''
self.buf = ''
saved_timeout = self.timeout
self.timeout = 0.5
self._recv()
self.timeout = saved_timeout
self.buf = ''
self.debug("flushInput")
def setBaudrate(self, baudrate):
'''set baudrate'''
from . import mavutil
if self.baudrate == baudrate:
return
self.baudrate = baudrate
self.mav.mav.serial_control_send(self.port,
mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE,
0,
self.baudrate,
0, [0]*70)
self.flushInput()
self.debug("Changed baudrate %u" % self.baudrate)
if __name__ == '__main__':
serial_list = auto_detect_serial(preferred_list=['*FTDI*',"*Arduino_Mega_2560*", "*3D_Robotics*", "*USB_to_UART*", '*PX4*', '*FMU*'])
for port in serial_list:
print("%s" % port)
| magicrub/mavlink | pymavlink/mavutil.py | Python | lgpl-3.0 | 57,225 |
from google.appengine.api import users
from google.appengine.api import memcache
from google.appengine.ext import db
import logging
class UserPrefs(db.Model):
tz_offset = db.IntegerProperty(default=0)
user = db.UserProperty(auto_current_user_add=True)
def cache_set(self):
tid = self.key().name()
memcache.set('UserPrefs:' + tid, self)
# Put needs to be overriden to use memcache
def put(self):
super(UserPrefs, self).put()
logging.info('cache set')
self.cache_set()
# This function gets the user but uses some hidden tricks.
def get_userprefs(user_id=None):
if not user_id:
user = users.get_current_user()
if not user:
return None
user_id = user.user_id()
userprefs = memcache.get('UserPrefs:' + user_id)
if not userprefs:
# Perform DB Query if cached version is not found
key = db.Key.from_path('UserPrefs', user_id)
userprefs = db.get(key)
if not userprefs:
userprefs = UserPrefs(key_name=user_id)
return userprefs | jscontreras/learning-gae | models.py | Python | lgpl-3.0 | 1,072 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-09 17:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patrons', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='patron',
name='canonisation',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='patron',
name='reminiscence',
field=models.DateField(blank=True, null=True),
),
]
| PrayAndGrow/server | patrons/migrations/0002_auto_20160409_1714.py | Python | lgpl-3.0 | 620 |
# (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
import unittest
import cartopy
import cartopy.io.shapereader as shp
COASTLINE_PATH = shp.natural_earth()
class TestCoastline(unittest.TestCase):
def test_robust(self):
# Make sure all the coastlines can be projected without raising any
# exceptions.
projection = cartopy.crs.TransverseMercator(central_longitude=-90)
reader = shp.Reader(COASTLINE_PATH)
all_geometries = list(reader.geometries())
geometries = []
geometries += all_geometries
#geometries += all_geometries[48:52] # Aus & Taz
#geometries += all_geometries[72:73] # GB
#for geometry in geometries:
for i, geometry in enumerate(geometries[93:]):
for line_string in geometry:
multi_line_string = projection.project_geometry(line_string)
if __name__ == '__main__':
unittest.main()
| lbdreyer/cartopy | lib/cartopy/tests/test_coastline.py | Python | lgpl-3.0 | 1,600 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, csv, time, codecs, shutil, urllib2, logging
from optparse import make_option
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from django.utils.text import slugify
from zipfile import ZipFile
from zup.utils import gooseapi, unicode_dict_reader, unique_mkdir
from zup.models import Job, Url
logger = logging.getLogger('zup')
class Command(BaseCommand):
'''
usage type:
python manage.py start_job --job=1 --cmd=test
'''
args = ''
help = 'execute some job on corpus.'
option_list = BaseCommand.option_list + (
make_option('--job',
action='store',
dest='job_pk',
default=False,
help='job primary key'),
make_option('--cmd',
action='store',
dest='cmd',
default=False,
help='manage.py command to be executed'),
)
def _test(self, job):
job.status = Job.RUNNING
job.save()
time.sleep(15)
job.status = Job.COMPLETED
job.save()
def _scrape(self, job, fields=['title', 'tags', 'meta_keywords']):
logger.debug('starting command "scrape"')
job.status = Job.RUNNING
job.save()
job_path = job.get_path()
path = unique_mkdir(os.path.join(job_path, 'files'))
urls = job.urls.all()
# create zip filename and remove previous one
zip_path = os.path.join(job_path, 'urls_to_zip.zip')
if os.path.exists(zip_path):
os.remove(zip_path)
# create csv report
rep_path = os.path.join(path, 'report.csv')
reports = []
logger.debug('zip path: %s' % zip_path)
# filename length
max_length = 64
with ZipFile(zip_path, 'w') as zip_file:
for i,url in enumerate(urls): # sync or not async
index = '%0*d' % (5, int(i) + 1)
url.status= Url.READY
url.save()
try:
g = gooseapi(url=url.url)
except urllib2.HTTPError, e:
url.status= Url.ERROR
url.log = '%s' % e
url.save()
continue
except urllib2.URLError, e:
url.status= Url.ERROR
url.log = '%s' % e
url.save()
continue
except ValueError, e: # that is, url is not a valid url
url.status= Url.ERROR
url.log = '%s' % e
url.save()
continue
except IOError, e: # probably the stopword file was not found, skip this url
url.status= Url.ERROR
url.log = '%s' % e
url.save()
continue
except Exception, e:
logger.exception(e)
continue
logger.debug('title: %s', g.title)
logger.debug('url: %s', url.url)
# handling not found title stuff
slug = '%s-%s' % (index,slugify(g.title if g.title else url.url)[:max_length])
slug_base = slug
textified = os.path.join(path, slug)
c = 1
while os.path.exists(textified):
candidate = '%s-%s-%s' % (index, slug_base, c)
if len(candidate) > max_length:
slug = slug[:max_length-len('-%s' % c)]
slug = re.sub('\-+','-',candidate)
textified = os.path.join(path, slug)
c += 1
textified = "%s.txt" % textified
with codecs.open(textified, encoding='utf-8', mode='w') as f:
f.write('\n\n%s\n\n\n\n' % g.title)
f.write(g.cleaned_text)
# completed url scraping
url.status= Url.COMPLETED
url.save()
zip_file.write(textified, os.path.basename(textified))
# WRITE SOME REPORT
result = {
'id': index,
'path': os.path.basename(textified),
'url': url.url
}
for i in fields:
if i == 'tags':
result[i] = ', '.join(getattr(g, i))
else:
result[i] = getattr(g, i)
result[i]=result[i].encode('utf8')
reports.append(result)
# JOB FINISHED, WRITING REPORT
with open(rep_path, 'w') as report:
writer = csv.DictWriter(report, ['id', 'path', 'url'] + fields)
writer.writeheader()
for report in reports:
writer.writerow(report)
zip_file.write(rep_path, os.path.basename(rep_path))
shutil.rmtree(path)
# close job
job.status = Job.COMPLETED
job.save()
def handle(self, *args, **options):
if not options['cmd']:
raise CommandError("\n ouch. You should specify a valid function as cmd param")
if not options['job_pk']:
raise CommandError("\n ouch. please provide a job id to record logs and other stuff")
# maximum 5 jobs at the same time
try:
job = Job.objects.get(pk=options['job_pk'])
except Job.DoesNotExist, e:
raise CommandError("\n ouch. Try again, job pk=%s does not exist!" % options['job_pk'])
cmd = '_%s' % options['cmd']
getattr(self, cmd)(job=job) # no job will be charged!
| medialab/zup | zup/management/commands/start_job.py | Python | lgpl-3.0 | 5,000 |
# -*-coding:Utf-8 -*
import Vue.Figure as figure
import Vue.Donnee as donnee
import Lecture.FonctionLectureClassique as classique
import numpy as np
import sys
########################################################################################
#------------------------------- Input -----------------------------
########################################################################################
if (len(sys.argv)==2):
filename=sys.argv[1]
print(filename+" will be printed")
else:
print("You must give the name of the output file")
sys.exit(1)
outputname_err="/".join(filename.split("/")[0:-1])+"/graphe_"+(filename.split("/")[-1]).split(".")[0]
########################################################################################
#------------------------------- Figure -----------------------------
########################################################################################
colors=["m","b","c","r","g","y","k","firebrick","purple"]
markers=["^","o",".","v"]
(dist,rank) = classique.lecture(filename,0,1)
(err1,err2) = classique.lecture(filename,2,3)
Dist = []
Rank = []
Err1 = []
Err2 = []
Courbes_dist = []
Courbes_rank = []
Courbes_err1 = []
Courbes_err2 = []
compt=0
ymax_err=0
ymin_err=1e30
offset = 49
for i in range(1,6):
Rank.append(rank[compt+0:compt+offset])
Dist.append(dist[compt+0])
Err1.append(err1[compt+0:compt+offset])
Err2.append(err2[compt+0:compt+offset])
ymax_err=max(ymax_err,max(err1[compt+0:compt+offset]))
ymax_err=max(ymax_err,max(err2[compt+0:compt+offset]))
ymin_err=min(ymin_err,min(err1[compt+0:compt+offset]))
ymin_err=min(ymin_err,min(err2[compt+0:compt+offset]))
compt+=offset
ncolor=0
for i in range(0,len(Dist)):
line1={"linestyle":"-","linewidth":3,"linecolor":colors[ncolor]}
line2={"linestyle":"--","linewidth":3,"linecolor":colors[ncolor]}
marker={"markerstyle":"None","markersize":10,"fillstyle":"full"}
Courbes_err1.append(donnee.Ligne(nom=r"ACA - distance="+str(Dist[i]),ordonnee=Err1[i],abscisse=Rank[i],line=line1,marker=marker))
Courbes_err2.append(donnee.Ligne(nom=r"SVD - distance="+str(Dist[i]),ordonnee=Err2[i],abscisse=Rank[i],line=line2,marker=marker))
ncolor+=1
xlim=[min(Rank[0])*0.75,max(Rank[0])*1.01]
ylim_erro=[ymin_err*0.75,ymax_err*1.25]
xlabel={"label":"Rank","fontsize":20}
ylabel_erro={"label":"Relative error","fontsize":20}
# titre={"titre":"Test","fontsize":20,"loc":"center"}
legende={"loc":"upper left","bbox_to_anchor":(1.01,1),"ncol":1,"fontsize":12}
Figure_erro=figure.Graphe1D(id=0,legende=legende,xlim=xlim,ylim=ylim_erro,xlabel=xlabel,ylabel=ylabel_erro,yscale="log",axis="off",format="pdf")
for courbe in Courbes_err1:
Figure_erro.AjoutCourbe(courbe)
for courbe in Courbes_err2:
Figure_erro.AjoutCourbe(courbe)
Figure_erro.TraceGraphe1D()
Figure_erro.EnregistreFigure(outputname_err)
Figure_erro.FermeFigure()
| xclaeys/ElastoPhi | postprocessing/graphes_output_err_decrease.py | Python | lgpl-3.0 | 2,917 |
#Importing helper class for RBPRM
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
from hpp.gepetto import Viewer
#reference pose for hyq
from hyq_ref_pose import hyq_ref
from hpp.corbaserver.rbprm.state_alg import *
from numpy import array
#calling script darpa_hyq_path to compute root path
import mount_hyq_path as tp
from os import environ
ins_dir = environ['DEVEL_DIR']
db_dir = ins_dir+"/install/share/hyq-rbprm/database/hyq_"
from hpp.corbaserver import Client
packageName = "hyq_description"
meshPackageName = "hyq_description"
rootJointType = "freeflyer"
# Information to retrieve urdf and srdf files.
urdfName = "hyq"
urdfSuffix = ""
srdfSuffix = ""
# This time we load the full body model of HyQ
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.setJointBounds ("base_joint_xyz", [-4,6, -1, 1, 0.3, 2.5])
# Setting a number of sample configurations used
nbSamples = 20000
ps = tp.ProblemSolver(fullBody)
r = tp.Viewer (ps, viewerClient=tp.r.client)
rootName = 'base_joint_xyz'
cType = "_3_DOF"
rLegId = 'rfleg'
rLeg = 'rf_haa_joint'
rfoot = 'rf_foot_joint'
offset = [0.,-0.021,0.]
normal = [0,1,0]
legx = 0.02; legy = 0.02
def addLimbDb(limbId, heuristicName, loadValues = True, disableEffectorCollision = False):
fullBody.addLimbDatabase(str(db_dir+limbId+'.db'), limbId, heuristicName,loadValues, disableEffectorCollision)
fullBody.addLimb(rLegId,rLeg,rfoot,offset,normal, legx, legy, nbSamples, "jointlimits", 0.1, cType)
lLegId = 'lhleg'
lLeg = 'lh_haa_joint'
lfoot = 'lh_foot_joint'
fullBody.addLimb(lLegId,lLeg,lfoot,offset,normal, legx, legy, nbSamples, "jointlimits", 0.05, cType)
#~
rarmId = 'rhleg'
rarm = 'rh_haa_joint'
rHand = 'rh_foot_joint'
fullBody.addLimb(rarmId,rarm,rHand,offset,normal, legx, legy, nbSamples, "jointlimits", 0.05, cType)
larmId = 'lfleg'
larm = 'lf_haa_joint'
lHand = 'lf_foot_joint'
fullBody.addLimb(larmId,larm,lHand,offset,normal, legx, legy, nbSamples, "jointlimits", 0.05, cType)
fullBody.runLimbSampleAnalysis(rLegId, "jointLimitsDistance", True)
fullBody.runLimbSampleAnalysis(lLegId, "jointLimitsDistance", True)
fullBody.runLimbSampleAnalysis(rarmId, "jointLimitsDistance", True)
fullBody.runLimbSampleAnalysis(larmId, "jointLimitsDistance", True)
#~ q_init = hyq_ref[:]; q_init[0:7] = tp.q_init[0:7];
#~ q_goal = hyq_ref[:]; q_goal[0:7] = tp.q_goal[0:7];
q_init = hyq_ref[:]; q_init[0:7] = tp.q_init[0:7]; q_init[2]=hyq_ref[2]+0.02
q_goal = hyq_ref[:]; q_goal[0:7] = tp.q_goal[0:7]; q_init[2]=hyq_ref[2]+0.02
# Randomly generating a contact configuration at q_init
#~ fullBody.setCurrentConfig (q_init)
#~ q_init = fullBody.generateContacts(q_init, [0,0,1])
# Randomly generating a contact configuration at q_end
#~ fullBody.setCurrentConfig (q_goal)
#~ q_goal = fullBody.generateContacts(q_goal, [0,0,1])
# specifying the full body configurations as start and goal state of the problem
fullBody.setStartState(q_init,[rLegId,lLegId,rarmId,larmId])
fullBody.setEndState(q_goal,[rLegId,lLegId,rarmId,larmId])
#~ fullBody.setStartState(q_init,[rLegId,lLegId,rarmId])
#~ fullBody.setEndState(q_goal,[rLegId,lLegId,rarmId])
r(q_init)
configs = []
from hpp.gepetto import PathPlayer
pp = PathPlayer (fullBody.client.basic, r)
from hpp.corbaserver.rbprm.tools.cwc_trajectory_helper import step, clean,stats, saveAllData, play_traj
#~ limbsCOMConstraints = { rLegId : {'file': "hyq/"+rLegId+"_com.ineq", 'effector' : rfoot},
#~ lLegId : {'file': "hyq/"+lLegId+"_com.ineq", 'effector' : lfoot},
#~ rarmId : {'file': "hyq/"+rarmId+"_com.ineq", 'effector' : rHand},
#~ larmId : {'file': "hyq/"+larmId+"_com.ineq", 'effector' : lHand} }
limbsCOMConstraints = { rLegId : {'file': "hrp2/RL_com.ineq", 'effector' : rfoot},
lLegId : {'file': "hrp2/LL_com.ineq", 'effector' : lfoot},
rarmId : {'file': "hrp2/RA_com.ineq", 'effector' : rHand},
larmId : {'file': "hrp2/LA_com.ineq", 'effector' : lHand} }
def initConfig():
r.client.gui.setVisibility("hyq", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hyq_trunk_large", "OFF")
r(q_init)
def endConfig():
r.client.gui.setVisibility("hyq", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hyq_trunk_large", "OFF")
r(q_goal)
def rootPath():
r.client.gui.setVisibility("hyq", "OFF")
tp.cl.problem.selectProblem("rbprm_path")
tp.r.client.gui.setVisibility("toto", "OFF")
r.client.gui.setVisibility("hyq", "OFF")
tp.r.client.gui.setVisibility("hyq_trunk_large", "ON")
tp.pp(0)
tp.r.client.gui.setVisibility("hyq_trunk_large", "OFF")
r.client.gui.setVisibility("hyq", "ON")
tp.cl.problem.selectProblem("default")
def genPlan(stepsize=0.06):
tp.cl.problem.selectProblem("default")
r.client.gui.setVisibility("hyq", "ON")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hyq_trunk_large", "OFF")
global configs
start = time.clock()
configs = fullBody.interpolate(stepsize, 5, 5, True)
end = time.clock()
print "Contact plan generated in " + str(end-start) + "seconds"
def contactPlan(step = 0.5):
r.client.gui.setVisibility("hyq", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hyq_trunk_large", "OFF")
global configs
for i in range(0,len(configs)):
r(configs[i]);
time.sleep(step)
def a():
print "initial configuration"
initConfig()
def b():
print "end configuration"
endConfig()
def c():
print "displaying root path"
rootPath()
def d(step=0.06):
print "computing contact plan"
genPlan(step)
def e(step = 0.5):
print "displaying contact plan"
contactPlan(step)
from bezier_traj import go0, go2, init_bezier_traj, reset
from hpp.corbaserver.rbprm.tools.cwc_trajectory_helper import play_trajectory
import time
from hpp.corbaserver.rbprm.rbprmstate import State
from hpp.corbaserver.rbprm.state_alg import addNewContact, isContactReachable, closestTransform, removeContact, addNewContactIfReachable, projectToFeasibleCom
path = []
def sc(ec):
pass
def pl(iid = None):
global path
if iid == None:
iid = len(path) -1
play_trajectory(fullBody,pp,path[iid])
def plc(ctx = 0, iid = None):
sc(ctx)
pl(iid)
def go():
return go0(states, mu=0.6,num_optim=2, use_kin = context == 0)
def plall(first = 0):
global path
sc(first)
for pId in range(len(path)):
play_trajectory(fullBody,pp,path[pId])
from pickle import load, dump
def save(fname):
sc(0)
all_data=[[],[]]
global states
for s in states:
all_data[0]+=[[s.q(), s.getLimbsInContact()]]
f = open(fname, "w")
dump(all_data,f)
f.close()
def load_save(fname):
f = open(fname, "r+")
all_data = load (f)
f.close()
sc(0)
global states
states = []
#~ for i in range(0,len(all_data[0]),2):
#~ print "q",all_data[0][i]
#~ print "lic",all_data[0][i+1]
#~ states+=[State(fullBody,q=all_data[0][i], limbsIncontact = all_data[0][i+1]) ]
for _, s in enumerate(all_data[0]):
states+=[State(fullBody,q=s[0], limbsIncontact = s[1]) ]
r(states[0].q())
def onepath(ol, ctxt=1, nopt=1, mu=1, effector = False):
reset()
sc(ctxt)
global path
global states
print "ctxt", ctxt
print "q", len(states[ol+1].q())
s = max(norm(array(states[ol+1].q()) - array(states[ol].q())), 1.) * 0.4
print "s",s
if(ol > len(path) -1):
path += [go0([states[ol],states[ol+1]], num_optim=nopt, mu=mu, use_kin = False, s=s, effector = effector)]
else:
path[ol]=go0([states[ol],states[ol+1]], num_optim=nopt, mu=mu, use_kin = False, s=s, effector = effector)
all_paths[ctxt] = path
def onepath2(states_subset, ctxt=1, nopt=1, mu=1, effector = False):
reset()
sc(ctxt)
global path
global states
#~ print "ctxt", ctxt
#~ print "q", len(states[ol+1].q())
#~ s = max(norm(array(states_subset[1].q()) - array(states_subset[0].q())), 1.) * 0.4
#~ print "s",s
#~ if(ol > len(path) -1):
path = all_paths[ctxt][:]
path += [go2(states_subset, num_optim=nopt, mu=mu, use_kin = False, s=None, effector = effector)]
#~ else:
#~ path[ol]=go2(states_subset, num_optim=nopt, mu=mu, use_kin = False, s=s, effector = effector)
all_paths[ctxt] = path
def save_paths(fname):
f = open(fname, "w")
dump(all_paths,f)
f.close()
#now try with latest paths
global all_path
global path
sc(0)
all_paths[0] = path[:]
f = open(fname+"all", "w")
dump(all_paths,f)
f.close()
def load_paths(fname):
f = open(fname, "r")
global all_paths
all_paths = load (f)
f.close()
sc(0)
global path
path = all_paths[0][:]
def sh(ctxt, i):
sc(ctxt)
r(states[i].q())
def lc():
load_save("19_06_s")
load_paths("19_06_p")
#~ save_paths("19_06_p_save")
save("19_06_s_save")
def sac():
save("19_06_s")
save_paths("19_06_p")
init_bezier_traj(fullBody, r, pp, configs, limbsCOMConstraints)
all_paths = [[],[]]
from hpp.corbaserver.rbprm.state_alg import *
#~ d(0.07);e(0.01)
i=0
#~ d(0.09); e(0.01); states = planToStates(fullBody,configs)
#~ lc()
#~ le = min(38, len(states)-10)
#~ onepath2(states [0:-1],nopt=3,mu=0.99,effector=True)
#~ e(0.01)
| pFernbach/hpp-rbprm-corba | script/scenarios/sandbox/siggraph_asia/chair/mount_hyq.py | Python | lgpl-3.0 | 9,667 |
# -*- coding: utf-8 -*-
#
# Copyright 2009: Johannes Raggam, BlueDynamics Alliance
# http://bluedynamics.com
# GNU Lesser General Public License Version 2 or later
__author__ = """Johannes Raggam <[email protected]>"""
__docformat__ = 'plaintext'
from setuptools import setup, find_packages
import sys, os
version = '1.0'
shortdesc ="Test models and executions for activities"
longdesc = open(os.path.join(os.path.dirname(__file__), 'README.txt')).read()
longdesc += open(os.path.join(os.path.dirname(__file__), 'LICENSE.txt')).read()
setup(name='activities.test.hospital',
version=version,
description=shortdesc,
long_description=longdesc,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Zope3',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules'
], # Get strings from http://pypi.python.org/pypi?:action=list_classifiers
keywords='UML Activities runtime',
author='Johannes Raggam',
author_email='[email protected]',
url='',
license='LGPL',
packages = find_packages('src'),
package_dir = {'': 'src'},
namespace_packages=['activities','activities.test'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*
'activities.metamodel',
'activities.runtime',
],
extras_require={
'test': [
'interlude',
]
},
entry_points="""
# -*- Entry points: -*-
""",
)
| bluedynamics/activities.test.hospital | setup.py | Python | lgpl-3.0 | 1,827 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lazagne.config.constant import constant
from lazagne.config.module_info import ModuleInfo
from lazagne.config import homes
from binascii import hexlify
import traceback
try:
import jeepney.auth
# except ImportError:
except Exception:
pass
else:
# Thanks to @mitya57 for its Work around
def make_auth_external():
hex_uid = hexlify(str(make_auth_external.uid).encode('ascii'))
return b'AUTH EXTERNAL %b\r\n' % hex_uid
jeepney.auth.make_auth_external = make_auth_external
class Libsecret(ModuleInfo):
def __init__(self):
ModuleInfo.__init__(self, 'libsecret', 'wallet')
def run(self):
items = []
visited = set()
try:
import dbus
import secretstorage
import datetime
except ImportError as e:
self.error('libsecret: {0}'.format(e))
return []
for uid, session in homes.sessions():
try:
# List bus connection names
bus = dbus.bus.BusConnection(session)
if 'org.freedesktop.secrets' not in [str(x) for x in bus.list_names()]:
continue
except Exception:
self.error(traceback.format_exc())
continue
collections = None
try:
# Python 2.7
collections = list(secretstorage.collection.get_all_collections(bus))
except Exception:
pass
if not collections:
try:
# Python 3
from jeepney.io.blocking import open_dbus_connection
make_auth_external.uid = uid
bus = open_dbus_connection(session)
collections = secretstorage.get_all_collections(bus)
except Exception:
self.error(traceback.format_exc())
continue
for collection in collections:
if collection.is_locked():
continue
label = collection.get_label()
if label in visited:
continue
visited.add(label)
try:
storage = collection.get_all_items()
except Exception:
self.error(traceback.format_exc())
continue
for item in storage:
values = {
'created': str(datetime.datetime.fromtimestamp(item.get_created())),
'modified': str(datetime.datetime.fromtimestamp(item.get_modified())),
'content-type': item.get_secret_content_type(),
'label': item.get_label(),
'Password': item.get_secret().decode('utf8'),
'collection': label,
}
# for k, v in item.get_attributes().iteritems():
# values[unicode(k)] = unicode(v)
items.append(values)
if item.get_label().endswith('Safe Storage'):
constant.chrome_storage.append(item.get_secret())
try:
bus.flush()
bus.close()
except Exception:
pass
return items
| AlessandroZ/LaZagne | Linux/lazagne/softwares/wallet/libsecret.py | Python | lgpl-3.0 | 3,425 |
# -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Manage the monitor list."""
# Import system lib
import re
import subprocess
# Import Glances lib
from glances.core.glances_logging import logger
from glances.core.glances_processes import glances_processes
class MonitorList(object):
"""This class describes the optional monitored processes list.
The monitored list is a list of 'important' processes to monitor.
The list (Python list) is composed of items (Python dict).
An item is defined (dict keys):
* description: Description of the processes (max 16 chars)
* regex: regular expression of the processes to monitor
* command: (optional) shell command for extended stat
* countmin: (optional) minimal number of processes
* countmax: (optional) maximum number of processes
"""
# Maximum number of items in the list
__monitor_list_max_size = 10
# The list
__monitor_list = []
def __init__(self, config):
"""Init the monitoring list from the configuration file."""
self.config = config
if self.config is not None and self.config.has_section('monitor'):
# Process monitoring list
self.__set_monitor_list('monitor', 'list')
else:
self.__monitor_list = []
def __set_monitor_list(self, section, key):
"""Init the monitored processes list.
The list is defined in the Glances configuration file.
"""
for l in range(1, self.__monitor_list_max_size + 1):
value = {}
key = "list_" + str(l) + "_"
try:
description = self.config.get_raw_option(section, key + "description")
regex = self.config.get_raw_option(section, key + "regex")
command = self.config.get_raw_option(section, key + "command")
countmin = self.config.get_raw_option(section, key + "countmin")
countmax = self.config.get_raw_option(section, key + "countmax")
except Exception as e:
logger.error("Cannot read monitored list: {0}".format(e))
pass
else:
if description is not None and regex is not None:
# Build the new item
value["description"] = description
try:
re.compile(regex)
except Exception:
continue
else:
value["regex"] = regex
value["command"] = command
value["countmin"] = countmin
value["countmax"] = countmax
value["count"] = None
value["result"] = None
# Add the item to the list
self.__monitor_list.append(value)
def __str__(self):
return str(self.__monitor_list)
def __repr__(self):
return self.__monitor_list
def __getitem__(self, item):
return self.__monitor_list[item]
def __len__(self):
return len(self.__monitor_list)
def __get__(self, item, key):
"""Meta function to return key value of item.
Return None if not defined or item > len(list)
"""
if item < len(self.__monitor_list):
try:
return self.__monitor_list[item][key]
except Exception:
return None
else:
return None
def update(self):
"""Update the command result attributed."""
# Only continue if monitor list is not empty
if len(self.__monitor_list) == 0:
return self.__monitor_list
# Iter upon the monitored list
for i in range(0, len(self.get())):
# Search monitored processes by a regular expression
processlist = glances_processes.getlist()
monitoredlist = [p for p in processlist if re.search(self.regex(i), p['cmdline']) is not None]
self.__monitor_list[i]['count'] = len(monitoredlist)
if self.command(i) is None:
# If there is no command specified in the conf file
# then display CPU and MEM %
self.__monitor_list[i]['result'] = 'CPU: {0:.1f}% | MEM: {1:.1f}%'.format(
sum([p['cpu_percent'] for p in monitoredlist]),
sum([p['memory_percent'] for p in monitoredlist]))
continue
else:
# Execute the user command line
try:
self.__monitor_list[i]['result'] = subprocess.check_output(self.command(i),
shell=True)
except subprocess.CalledProcessError:
self.__monitor_list[i]['result'] = _("Error: ") + self.command(i)
except Exception:
self.__monitor_list[i]['result'] = _("Cannot execute command")
return self.__monitor_list
def get(self):
"""Return the monitored list (list of dict)."""
return self.__monitor_list
def set(self, newlist):
"""Set the monitored list (list of dict)."""
self.__monitor_list = newlist
def getAll(self):
# Deprecated: use get()
return self.get()
def setAll(self, newlist):
# Deprecated: use set()
self.set(newlist)
def description(self, item):
"""Return the description of the item number (item)."""
return self.__get__(item, "description")
def regex(self, item):
"""Return the regular expression of the item number (item)."""
return self.__get__(item, "regex")
def command(self, item):
"""Return the stat command of the item number (item)."""
return self.__get__(item, "command")
def result(self, item):
"""Return the reult command of the item number (item)."""
return self.__get__(item, "result")
def countmin(self, item):
"""Return the minimum number of processes of the item number (item)."""
return self.__get__(item, "countmin")
def countmax(self, item):
"""Return the maximum number of processes of the item number (item)."""
return self.__get__(item, "countmax")
| nclsHart/glances | glances/core/glances_monitor_list.py | Python | lgpl-3.0 | 7,047 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# coding=utf-8
import sunburnt
from anta.util import config
# solr $ANTA_HOME/solr-conf
config_solr = config.config["solr"]
class SOLRInterface():
si = sunburnt.SolrInterface(config_solr['endpoint'])
def initialize(self):
"""Initialize schema"""
self.si.init_schema()
def delete_all(self):
"""Delete all documents"""
self.si.delete_all()
self.si.commit()
def add_documents(self, documents):
"""Add some documents"""
self.si.add(documents)
self.si.commit()
def add_document(self, document):
"""Add a document"""
self.si.add(document)
def commit(self):
self.si.commit()
def delete_document(self, document):
"""Delete a document"""
pass
| medialab/ANTA2 | anta/storage/solr_client.py | Python | lgpl-3.0 | 826 |
import unittest
from Tribler.community.market.core.message import TraderId, MessageNumber, MessageId
from Tribler.community.market.core.order import OrderId, OrderNumber
from Tribler.community.market.core.price import Price
from Tribler.community.market.core.quantity import Quantity
from Tribler.community.market.core.side import Side
from Tribler.community.market.core.tick import Tick
from Tribler.community.market.core.timeout import Timeout
from Tribler.community.market.core.timestamp import Timestamp
class SideTestSuite(unittest.TestCase):
"""Side test cases."""
def setUp(self):
# Object creation
self.tick = Tick(MessageId(TraderId('0'), MessageNumber('message_number')),
OrderId(TraderId('0'), OrderNumber(1)), Price(400, 'BTC'), Quantity(30, 'MC'),
Timeout(float("inf")), Timestamp(float("inf")), True)
self.tick2 = Tick(MessageId(TraderId('1'), MessageNumber('message_number')),
OrderId(TraderId('1'), OrderNumber(2)), Price(800, 'BTC'), Quantity(30, 'MC'),
Timeout(float("inf")), Timestamp(float("inf")), True)
self.side = Side()
def test_max_price(self):
# Test max price (list)
self.assertEquals(None, self.side.get_max_price('BTC', 'MC'))
self.assertEquals(None, self.side.get_max_price_list('BTC', 'MC'))
self.side.insert_tick(self.tick)
self.side.insert_tick(self.tick2)
self.assertEquals('30.000000 MC\t@\t800.000000 BTC\n', str(self.side.get_max_price_list('BTC', 'MC')))
self.assertEquals(Price(800, 'BTC'), self.side.get_max_price('BTC', 'MC'))
def test_min_price(self):
# Test min price (list)
self.assertEquals(None, self.side.get_min_price_list('BTC', 'MC'))
self.assertEquals(None, self.side.get_min_price('BTC', 'MC'))
self.side.insert_tick(self.tick)
self.side.insert_tick(self.tick2)
self.assertEquals('30.000000 MC\t@\t400.000000 BTC\n', str(self.side.get_min_price_list('BTC', 'MC')))
self.assertEquals(Price(400, 'BTC'), self.side.get_min_price('BTC', 'MC'))
def test_insert_tick(self):
# Test insert tick
self.assertEquals(0, len(self.side))
self.assertFalse(self.side.tick_exists(OrderId(TraderId('0'), OrderNumber(1))))
self.side.insert_tick(self.tick)
self.side.insert_tick(self.tick2)
self.assertEquals(2, len(self.side))
self.assertTrue(self.side.tick_exists(OrderId(TraderId('0'), OrderNumber(1))))
def test_remove_tick(self):
# Test remove tick
self.side.insert_tick(self.tick)
self.side.insert_tick(self.tick2)
self.side.remove_tick(OrderId(TraderId('0'), OrderNumber(1)))
self.assertEquals(1, len(self.side))
self.side.remove_tick(OrderId(TraderId('1'), OrderNumber(2)))
self.assertEquals(0, len(self.side))
def test_get_price_level_list_wallets(self):
"""
Test the price level lists of wallets of a side
"""
self.assertFalse(self.side.get_price_level_list_wallets())
self.side.insert_tick(self.tick)
self.assertTrue(self.side.get_price_level_list_wallets())
def test_get_list_representation(self):
"""
Testing the list representation of a side
"""
self.assertFalse(self.side.get_list_representation())
self.side.insert_tick(self.tick)
list_rep = self.side.get_list_representation()
self.assertTrue(list_rep)
| vandenheuvel/tribler | Tribler/Test/Community/Market/test_side.py | Python | lgpl-3.0 | 3,565 |
from django.db import models
from django.contrib.gis.db import models as gmodels
from django_hstore import hstore
class License(models.Model):
"""A license under which a DataSource is published and useable."""
name = models.CharField(max_length=50)
url = models.URLField(help_text="A link to this license.")
version = models.CharField(
max_length=10, blank=True, null=True,
help_text="If this is some version of the license, identify it.")
body = models.TextField(
blank=True, null=True,
help_text="If there is no URL available, you can paste the license.")
def __unicode__(self):
return self.name
class DataSource(models.Model):
"""A data source from a third party."""
title = models.CharField(max_length=100)
attribution = models.TextField(
help_text="The attribution as the author requested it.")
year = models.PositiveIntegerField()
license = models.ForeignKey(License)
url = models.URLField(blank=True, null=True)
def __unicode__(self):
return self.title
class DataLayer(gmodels.Model):
"""Any external data that has a geometry that can be added to the map."""
name = gmodels.CharField(max_length=200)
description = gmodels.TextField(null=True, blank=True)
added = gmodels.DateTimeField(auto_now_add=True)
source = gmodels.ForeignKey(DataSource)
shape = gmodels.GeometryField()
info = hstore.DictionaryField(
null=True, blank=True,
help_text="Any supplementary data for this shape.")
| openwater/h2o-really | supplements/models.py | Python | unlicense | 1,549 |
#!/usr/bin/env python
#This is free and unencumbered software released into the public domain.
#Anyone is free to copy, modify, publish, use, compile, sell, or
#distribute this software, either in source code form or as a compiled
#binary, for any purpose, commercial or non-commercial, and by any
#means.
#In jurisdictions that recognize copyright laws, the author or authors
#of this software dedicate any and all copyright interest in the
#software to the public domain. We make this dedication for the benefit
#of the public at large and to the detriment of our heirs and
#successors. We intend this dedication to be an overt act of
#relinquishment in perpetuity of all present and future rights to this
#software under copyright law.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
#OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
#ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
#For more information, please refer to <http://unlicense.org/>
#This is a python program that will open up a random url from a list
#It was a weird project for me to come up with
#Devloper: Matthew Deig
#Date: 2014-11-03
import webbrowser, random
from optparse import OptionParser
def argParser():
#this will return 2 objects
#the options and args
parser = OptionParser()
parser.add_option("-a", "--add", dest="newurl", help="Adds a new url to the list", metavar="NEWURL")
parser.add_option("-q", "--nobrowser", action="store_true", dest="nobrowser", help="Does not open a broswer just prints the url out")
return parser.parse_args()
def openBroswer(url):
webbrowser.open_new_tab(url)
def addUrl(url):
file = open("url.db", "a")
file.write(url+"\n")
file.close()
def readUrls():
file = open("url.db", "r")
urls = file.readlines()
file.close()
return urls
def pickUrl(urls):
#this is probley not very random but it is going to work
return random.choice(urls)
if __name__ == "__main__":
(options, args) = argParser()
if options.newurl is not None:
addUrl(options.newurl)
else:
if(options.nobrowser):
print(pickUrl(readUrls()))
else:
openBroswer(pickUrl(readUrls()))
| notptr/randURL | ranurl.py | Python | unlicense | 2,503 |
#!/usr/bin/env python
"""Ensure that invalid files (non-episodes) are not renamed
"""
from functional_runner import run_tvnamer, verify_out_data
from helpers import attr
@attr("functional")
def test_simple_single_file():
"""Boring example
"""
out_data = run_tvnamer(
with_files = ['Some File.avi'],
with_flags = ["--batch"])
expected_files = ['Some File.avi']
verify_out_data(out_data, expected_files, expected_returncode = 2)
@attr("functional")
def test_no_series_name():
"""File without series name should be skipped (unless '--name=MySeries' arg is supplied)
"""
out_data = run_tvnamer(
with_files = ['s01e01 Some File.avi'],
with_flags = ["--batch"])
expected_files = ['s01e01 Some File.avi']
verify_out_data(out_data, expected_files, expected_returncode = 2)
@attr("functional")
def test_ambigious():
invalid_files = [
'show.123.avi', # Maybe s01e23 but too ambigious. #140
'Scrubs.0101.avi', # Ambigious. #140
]
for f in invalid_files:
out_data = run_tvnamer(
with_files = [f],
with_flags = ["--batch"])
expected_files = [f]
verify_out_data(out_data, expected_files, expected_returncode = 2)
| dbr/tvnamer | tests/test_invalid_files.py | Python | unlicense | 1,264 |
#!/usr/bin/env python
import datetime
import sys
import numpy
import math
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import LogNorm
import data
KEYFILE = os.path.expanduser('~/.ssh/livedatakey')
WEBSPACE = '[email protected]:/var/www/html/ipeek'
WORKDIR="/tmp/"
def getLiveData(datafile):
path = "/net/charlotte/var/ftp/pub/sansdata/"+datafile
sansfile="[email protected]:%s"%path
workfile=WORKDIR+"livedata."+datafile[:3].lower()
cmd = ("scp -p -i %s %s %s"
% (KEYFILE, sansfile, workfile))
#print cmd
os.system(cmd)
return workfile
def putLiveData(imgname,htmlname,rawdata=None):
files = [WORKDIR+f for f in (imgname, htmlname)]
if rawdata is not None: files.append(rawdata)
# assuming no spaces in filenames
cmd = "scp -p -i %s %s %s"% (KEYFILE, " ".join(files), WEBSPACE)
#print cmd
os.system(cmd)
def createLiveHTML(metadata,imgname,htmlname):
collen=16.258
if (metadata['sample.table'] == '1'):
offset=0.55
else:
offset=0
dataimage = imgname
outfile = open(htmlname,"w")
print >> outfile, """
<?
include("/var/www/include/utility.inc")
?>
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%s status</title>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<meta http-equiv="refresh" content="30" />
<link rel="stylesheet" href="http://www.ncnr.nist.gov/programs/sans/scripts/style.css" type="text/css">
<script src="http://www.ncnr.nist.gov/programs/sans/scripts/java_scripts.js" type="text/javascript"></script>
</head>
<body>
<div class="bannerback">
<div class="bannerleft"><a href="http://www.ncnr.nist.gov/"><img src="/images/ncnr_banner_title_2.gif" alt="NIST Center for Neutron Research Logo"></a></div>
<div class="bannerright"><a href="http://www.nist.gov/"><img src="/images/ncnr_banner_nist_name.gif" alt="NIST Logo"></a></div>
</div>
<div class="nav">
<ul>
<li><a href="http://www.ncnr.nist.gov/index.html">NCNR Home</a></li>
<li><a href="http://www.ncnr.nist.gov/instruments/index.html">Instruments</a></li>
<li><a href="http://www.ncnr.nist.gov/programs/index.html">Science</a></li>
<li><a href="http://www.ncnr.nist.gov/experiments.html">Experiments</a></li>
<li><a href="http://www.ncnr.nist.gov/sitemap.html">Sitemap</a></li>
</ul>
</div>
<div class="container">
<div align='center'><font size='Large'>%s : %s</font></div>
<div align='center'><img src="%s" alt="SANS pattern"></div>
<table align='center' border=0>
<tr valign='top'><td>
<table border=1>
<tr><td width='100px'>Date/Time</td><td>%s</td>
<tr><td>Count Time</td><td>%s s</td>
<tr><td>Elapsed Count Time</td><td>%s s</td>
<tr><td>Monitor Counts</td><td>%d</td>
<tr><td>Detector Counts</td><td>%d</td>
</table>
</td><td>
<table border=1>
<tr><td>Guides</td><td>%d</td>
<tr><td>SDD</td><td>%.1f m</td>
<tr><td>Lambda</td><td>%.1f A</td>
<tr><td>Source Aperture</td><td>%.1f cm</td>
<tr><td>Sample Aperture</td><td>%.1f cm</td>
</table>
</td><td>
<table border=1>
<tr><td>Sample Position</td><td>%.1f</td>
<tr><td>Sample Temperature</td><td>%.1f C</td>
</table>
</td></tr>
</table>
<div align='center'>The data is updated every 5 minutes and this page will refresh every 30s</div>
<div align='center'>Last data update: %s</div>
<div align='center'>Last page refresh:
<script type="text/javascript" language="JavaScript">
var date = new Date();
document.write(date.toString())
</script>
</div>
<?
include("/var/www/html/programs/sans/SANS_bottom1.inc");
?> """ % (imgname[:3],
metadata['run.defdir'],
metadata['sample.labl'],
dataimage,
metadata['run.datetime'],
metadata['run.ctime']*metadata['run.npre'],
metadata['run.rtime'],
metadata['run.moncnt'],
metadata['run.detcnt'],
round((collen-metadata['resolution.ap12dis']-offset)/1.55),
metadata['det.dis'],
metadata['resolution.lmda'],
metadata['resolution.ap1'],
metadata['resolution.ap2'],
metadata['sample.position'],
metadata['sample.temp'],
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
outfile.close()
def generateDataImage(data,metadata,imgname):
fig = plt.figure(figsize=(8,4))
ax = fig.add_subplot(121)
ax.imshow(data,origin='lower',cmap=cm.jet)
ax2 = fig.add_subplot(122)
vmax = data.max()
if vmax < 1: vmax=1
ax2.imshow(data,origin='lower',cmap=cm.jet,norm=LogNorm(vmin=1, vmax=vmax))
#plt.colorbar(data,ax=ax2,norm=LogNorm(vmin=1))
fig.savefig(imgname)
def run_update(inst=None):
imgname = inst+"_livedata.png"
htmlname = inst+"_livedata.html"
datafile = inst[:3]+"Current/live001.sa3_ice_a001"
localfile = getLiveData(datafile)
detdata,metadata = data.readNCNRData(localfile)
generateDataImage(detdata,metadata,WORKDIR+imgname)
createLiveHTML(metadata,imgname,WORKDIR+htmlname)
putLiveData(imgname,htmlname,rawdata=localfile)
if __name__ == "__main__":
inst = sys.argv[1] if len(sys.argv) == 2 else None
run_update(inst)
| scattering/ipeek | server/sans/livesans.py | Python | unlicense | 5,262 |
__source__ = 'https://leetcode.com/problems/knight-dialer/'
# Time: O(N)
# Space: O(1)
#
# Description: Leetcode # 935. Knight Dialer
#
# A chess knight can move as indicated in the chess diagram below:
#
# This time, we place our chess knight on any numbered key of a phone pad (indicated above),
# and the knight makes N-1 hops. Each hop must be from one key to another numbered key.
#
# Each time it lands on a key (including the initial placement of the knight),
# it presses the number of that key, pressing N digits total.
#
# How many distinct numbers can you dial in this manner?
#
# Since the answer may be large, output the answer modulo 10^9 + 7.
#
# Example 1:
#
# Input: 1
# Output: 10
# Example 2:
#
# Input: 2
# Output: 20
# Example 3:
#
# Input: 3
# Output: 46
#
# Note:
#
# 1 <= N <= 5000
#
import unittest
# 908ms 59.71%
class Solution(object):
def knightDialer(self, N):
"""
:type N: int
:rtype: int
"""
MOD = 10**9 + 7
moves = [[4,6],[6,8],[7,9],[4,8],[3,9,0],[],
[1,7,0],[2,6],[1,3],[2,4]]
dp = [1] * 10
for hops in xrange(N-1):
dp2 = [0] * 10
for node, count in enumerate(dp):
for nei in moves[node]:
dp2[nei] += count
dp2[nei] %= MOD
dp = dp2
return sum(dp) % MOD
# Time: O(longN) regarding ower matrix
# https://math.stackexchange.com/questions/1890620/finding-path-lengths-by-the-power-of-adjacency-matrix-of-an-undirected-graph
# https://leetcode.com/problems/knight-dialer/discuss/189252/O(logN)
# 64ms 98.74%
import numpy as np
class Solution2(object):
def knightDialer(self, N):
mod = 10**9 + 7
if N == 1: return 10
M = np.matrix([[0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0]])
res, N = 1, N - 1
while N:
if N % 2: res = res * M % mod
M = M * M % mod
N /= 2
return int(np.sum(res)) % mod
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/knight-dialer/solution/
# Best explanation: https://leetcode.com/problems/knight-dialer/discuss/190787/How-to-solve-this-problem-explained-for-noobs!!!
Approach 1: Dynamic Programming
Complexity Analysis
Time Complexity: O(N)
Space Complexity: O(1)
DP: "the total number of unique paths to (i, j) for certain hops n
is equal to the sum of total number of unique paths to each valid position
from which (i, j) can be reached using n - 1 hops".
# 47ms 59.39%
class Solution {
public int knightDialer(int N) {
int MOD = 1_000_000_007;
int[][] moves = new int[][]{
{4,6},{6,8},{7,9},{4,8},{3,9,0},
{},{1,7,0},{2,6},{1,3},{2,4}
};
int[][] dp = new int[2][10];
Arrays.fill(dp[0], 1);
for (int hops = 0; hops < N -1; ++hops) {
Arrays.fill(dp[~hops & 1], 0);
for (int node = 0; node < 10; ++node) {
for (int nei : moves[node]) {
dp[~hops & 1][nei] += dp[hops & 1][node];
dp[~hops & 1][nei] %= MOD;
}
}
}
long ans = 0;
for (int x : dp[~N & 1]) ans += x;
return (int) (ans % MOD);
}
}
# Memorization
# 5ms 99.93%
class Solution {
private static final int MOD = 1_000_000_007;
private static final int[][] dp = new int[5001][10];
private static final int[][] moves = {{4, 6}, {6, 8}, {7, 9}, {4, 8}, {3, 9, 0}, {}, {1, 7, 0},{2, 6}, {1, 3}, {2, 4}};
public int knightDialer(int N) {
int res = 0;
for (int i = 0; i < 10; i++) {
res = (res + helper(N, i)) % MOD;
}
return res;
}
private int helper(int N, int digit) {
if (N == 1) return 1;
if (digit == 5) return 0;
if (dp[N][digit] > 0) return dp[N][digit];
for (int next : moves[digit]) {
dp[N][digit] = (dp[N][digit] + helper(N -1, next)) % MOD;
}
return dp[N][digit];
}
}
''' | JulyKikuAkita/PythonPrac | cs15211/KnightDialer.py | Python | apache-2.0 | 4,651 |
import sys
from tasks import TaskExecutionError
from templates import TaskTemplateResolver, TemplateKeyError
from config import DatamakeConfig
import runner
import json
import utils
def parse_args(args):
try:
import argparse
return parse_args_with_argparse(args)
except ImportError:
import optparse
return parse_args_with_optparse(args)
def parse_args_with_argparse(args):
import argparse
parser = argparse.ArgumentParser(description='Run datamake task flow.')
parser.add_argument('task_id', metavar='task_id', type=str, help='target task to be run')
parser.add_argument('config_files', metavar='config_file', type=str, nargs='+',
help='task config files')
parser.add_argument('--param', dest='parameters', action='append',
help='specify KEY=VALUE parameter that will override parameters on all tasks')
parser.add_argument('--eval-param', dest='eval_parameters', action='append',
help='specify KEY=VALUE parameter that will override parameters on all tasks. VALUE will be replaced by eval(VALUE) in python. If the eval output is a list, the task flow will be executed per value.')
parser.add_argument('--showgraph', dest='showgraph', action='store_true',
help='print the task dependency graph but don\'t run tasks.')
parser.add_argument('--dryrun', dest='dryrun', action='store_true',
help='print all tasks and if they are pending but do not execute them')
parser.add_argument('--delete-artifacts', dest='delete_artifacts', action='store_true',
help='beware! deletes all artifacts in the flow!')
return parser.parse_args(args)
def parse_args_with_optparse(args):
import optparse
usage = """usage: %prog [-h] [--param PARAMETERS] [--eval-param EVAL_PARAMETERS]
[--showgraph] [--dryrun] [--delete-artifacts]
task_id config_file [config_file ...]"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('--param', dest='parameters', action='append',
help='specify KEY=VALUE parameter that will override parameters on all tasks')
parser.add_option('--eval-param', dest='eval_parameters', action='append',
help='specify KEY=VALUE parameter that will override parameters on all tasks. VALUE will be replaced by eval(VALUE) in python. If the eval output is a list, the task flow will be executed per value.')
parser.add_option('--showgraph', dest='showgraph', action='store_true',
help='print the task dependency graph but don\'t run tasks')
parser.add_option('--dryrun', dest='dryrun', action='store_true',
help='print all tasks and if they are pending but do not execute them')
parser.add_option('--delete-artifacts', dest='delete_artifacts', action='store_true',
help='beware! deletes all artifacts in the flow!')
(options, remaining) = parser.parse_args()
if len(remaining) < 2:
print "Not enough arguments, need: task_id config_files | [config_file]"
options.task_id = remaining[0]
options.config_files = remaining[1:]
return options
def run_tasks(task_runner, pending_tasks):
try:
for task_id in pending_tasks:
task_runner.run_task(task_id)
return 0
except TaskExecutionError, e:
print "Error while executing task ", task_id
print e.task.tuple()
print e.message
task_runner.abort_pending_tasks()
return 1
finally:
task_runner.delete_artifacts(pending_tasks)
def dry_run_tasks(task_runner, pending_tasks):
print "Starting dry run"
for task_id in pending_tasks:
task = task_runner.get_task(task_id)
if task.command:
print "command:", task.command
if task.artifact:
print "artifact:",task.artifact.uri()
print "Dry run complete"
return 0
def get_config(config_filename):
config = DatamakeConfig()
config.load_from_file(config_filename)
return config
def get_template_resolver(configs, override_parameters={}):
task_template_sets = [config.task_templates(override_parameters) for config in configs]
flattened_task_templates = [item for sublist in task_template_sets for item in sublist]
template_resolver = TaskTemplateResolver(flattened_task_templates)
return template_resolver
def main():
args = parse_args(sys.argv[1:])
task_id = args.task_id
parameters = dict(param.split('=') for param in args.parameters) if args.parameters else {}
override_parameters_list = []
if args.eval_parameters:
evaled_parameters_list = list(utils.evalulate_parameters(dict(param.split('=') for param in args.eval_parameters)))
for evaled_parameters in evaled_parameters_list:
override_parameters = dict(evaled_parameters)
override_parameters.update(parameters)
override_parameters_list.append(override_parameters)
else:
override_parameters_list = [parameters]
if (len(args.config_files) > 1) and not ('.' in task_id):
print "task_id must be namespaced (eg. namespace.task) when multiple config files are used. You provided '%s'" % task_id
return 1
configs = [get_config(filename) for filename in args.config_files]
exit_status = 0
for override_parameters in override_parameters_list:
template_resolver = get_template_resolver(configs, override_parameters)
try:
task_graph = template_resolver.resolve_task_graph(task_id)
except TemplateKeyError, e:
print e
exit_status = 1
break
task_runner = runner.Runner(task_id, task_graph)
if args.showgraph:
print "Task graph:"
exit_status = task_runner.show_graph()
break
print "Starting Flow"
print "Override params: %s" % json.dumps(override_parameters, indent=True)
pending_tasks = task_runner.get_pending_execution_order()
print "Task status:"
task_runner.print_all_task_status()
print "Trimming tasks..."
print "Pending tasks"
task_runner.print_task_status(pending_tasks)
if args.dryrun:
exit_status += dry_run_tasks(task_runner, pending_tasks)
elif args.delete_artifacts:
print "Forcing removal of existing artifacts"
task_runner.delete_all_artifacts(force=True)
else:
exit_status += run_tasks(task_runner, pending_tasks)
print "Final status"
task_runner.print_all_task_status()
print
if exit_status:
break
print "FAILED" if exit_status else "SUCCESS"
return exit_status
if __name__ == '__main__':
main()
| tims/datamake | datamake/datamake.py | Python | apache-2.0 | 6,497 |
from setuptools import setup, find_packages
setup(
name = "scorecard",
version = "0.1",
packages = find_packages()
)
| opme/SurgeonScorecard | python/setup.py | Python | apache-2.0 | 135 |
import inspect
import typing as t
from functools import wraps
from .utils import _PassArg
from .utils import pass_eval_context
V = t.TypeVar("V")
def async_variant(normal_func): # type: ignore
def decorator(async_func): # type: ignore
pass_arg = _PassArg.from_obj(normal_func)
need_eval_context = pass_arg is None
if pass_arg is _PassArg.environment:
def is_async(args: t.Any) -> bool:
return t.cast(bool, args[0].is_async)
else:
def is_async(args: t.Any) -> bool:
return t.cast(bool, args[0].environment.is_async)
@wraps(normal_func)
def wrapper(*args, **kwargs): # type: ignore
b = is_async(args)
if need_eval_context:
args = args[1:]
if b:
return async_func(*args, **kwargs)
return normal_func(*args, **kwargs)
if need_eval_context:
wrapper = pass_eval_context(wrapper)
wrapper.jinja_async_variant = True
return wrapper
return decorator
async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V":
if inspect.isawaitable(value):
return await t.cast("t.Awaitable[V]", value)
return t.cast("V", value)
async def auto_aiter(
iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> "t.AsyncIterator[V]":
if hasattr(iterable, "__aiter__"):
async for item in t.cast("t.AsyncIterable[V]", iterable):
yield item
else:
for item in t.cast("t.Iterable[V]", iterable):
yield item
async def auto_to_list(
value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]",
) -> t.List["V"]:
return [x async for x in auto_aiter(value)]
| sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/jinja2/async_utils.py | Python | apache-2.0 | 1,751 |
import pytest
from insights.tests import context_wrap
from insights.parsers import ParseException
from insights.parsers.mongod_conf import MongodbConf
NORMAL_CONF = """
# mongodb.conf - generated from Puppet
#where to log
logpath=/var/log/mongodb/mongodb.log
logappend=true
# Set this option to configure the mongod or mongos process to bind to and
# listen for connections from applications on this address.
# You may concatenate a list of comma separated values to bind mongod to multiple IP addresses.
bind_ip = 127.0.0.1
# fork and run in background
fork=true
dbpath=/var/lib/mongodb
# location of pidfile
pidfilepath=/var/run/mongodb/mongodb.pid
# Enables journaling
journal = true
# Turn on/off security. Off is currently the default
noauth=true
abc=
""".strip()
NORMAL_CONF_V1 = """
=/var/log/mongodb/mongodb.log
logappend=true # noauth=true
""".strip()
YAML_CONF = """
# mongod.conf
# for documentation of all options, see:
# http://docs.mongodb.org/manual/reference/configuration-options/
# where to write logging data.
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod.log
# Where and how to store data.
storage:
dbPath: /var/lib/mongo
journal:
enabled: true
# engine:
# mmapv1:
# wiredTiger:
# how the process runs
processManagement:
fork: true # fork and run in background
pidFilePath: /var/run/mongodb/mongod.pid # location of pidfile
# network interfaces
net:
port: 27017
#bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces.
#bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces.
#security:
#operationProfiling:
#replication:
#sharding:
## Enterprise-Only Options
#auditLog:
#snmp:
""".strip()
YAML_CONF_UNPARSABLE = """
systemLog:
destination: file
logAppend: true
port=27017
""".strip()
def test_mongodb_conf():
result = MongodbConf(context_wrap(YAML_CONF))
assert result.get("security") is None
assert result.get("processManagement") == {
'fork': True,
'pidFilePath': '/var/run/mongodb/mongod.pid'}
assert result.is_yaml is True
assert result.port == 27017
assert result.bindip is None
assert result.dbpath == '/var/lib/mongo'
assert result.fork is True
assert result.pidfilepath == '/var/run/mongodb/mongod.pid'
assert result.syslog == 'file'
assert result.logpath == '/var/log/mongodb/mongod.log'
result = MongodbConf(context_wrap(NORMAL_CONF))
assert result.is_yaml is False
assert result.port is None
assert result.bindip == '127.0.0.1'
assert result.dbpath == '/var/lib/mongodb'
assert result.fork == 'true'
assert result.pidfilepath == '/var/run/mongodb/mongodb.pid'
assert result.syslog is None
assert result.logpath == '/var/log/mongodb/mongodb.log'
assert result.get("abc") == ''
assert result.get("def") is None
result = MongodbConf(context_wrap(NORMAL_CONF_V1))
assert result.is_yaml is False
assert len(result.data) == 2
assert result.get("logappend") == 'true'
with pytest.raises(ParseException) as e:
MongodbConf(context_wrap(YAML_CONF_UNPARSABLE))
assert "mongod conf parse failed:" in str(e.value)
| wcmitchell/insights-core | insights/parsers/tests/test_mongod_conf.py | Python | apache-2.0 | 3,241 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import filecmp
import os
import random
import tempfile
import time
import sys
import testtools
import mock
import mox
import glanceclient.exc
from oslo.config import cfg
from nova import context
from nova import exception
from nova.image import glance
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
from nova.tests import matchers
from nova import utils
import nova.virt.libvirt.utils as lv_utils
CONF = cfg.CONF
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
class TestGlanceSerializer(test.NoDBTestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'}],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}]}}
converted_expected = {
'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings':
'[{"device": "bbb", "virtual": "aaa"}, '
'{"device": "yyy", "virtual": "xxx"}]',
'block_device_mapping':
'[{"virtual_device": "fake", "device_name": "/dev/fake"}, '
'{"virtual_device": "ephemeral0", '
'"device_name": "/dev/fake0"}]'}}
converted = glance._convert_to_string(metadata)
self.assertEqual(converted, converted_expected)
self.assertEqual(glance._convert_from_string(converted), metadata)
class TestGlanceImageService(test.NoDBTestCase):
"""
Tests the Glance image service.
At a high level, the translations involved are:
1. Glance -> ImageService - This is needed so we can support
multple ImageServices (Glance, Local, etc)
2. ImageService -> API - This is needed so we can support multple
APIs (OpenStack, EC2)
"""
NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
def setUp(self):
super(TestGlanceImageService, self).setUp()
fakes.stub_out_compute_api_snapshot(self.stubs)
self.client = glance_stubs.StubGlanceClient()
self.service = self._create_image_service(self.client)
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.mox = mox.Mox()
self.files_to_clean = []
def tearDown(self):
super(TestGlanceImageService, self).tearDown()
self.mox.UnsetStubs()
for f in self.files_to_clean:
try:
os.unlink(f)
except os.error:
pass
def _get_tempfile(self):
(outfd, config_filename) = tempfile.mkstemp(prefix='nova_glance_tests')
self.files_to_clean.append(config_filename)
return (outfd, config_filename)
def _create_image_service(self, client):
def _fake_create_glance_client(context, host, port, use_ssl, version):
return client
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client_wrapper = glance.GlanceClientWrapper(
'fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
def _make_fixture(**kwargs):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None}
fixture.update(kwargs)
return fixture
def _make_datetime_fixture(self):
return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
updated_at=self.NOW_GLANCE_FORMAT,
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
# Ensure instance_id is persisted as an image-property.
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {'instance_id': '42', 'user_id': 'fake'},
'owner': None,
}
self.assertThat(image_meta, matchers.DictMatches(expected))
image_metas = self.service.detail(self.context)
self.assertThat(image_metas[0], matchers.DictMatches(expected))
def test_create_without_instance_id(self):
"""
Ensure we can create an image without having to specify an
instance_id. Public images are an example of an image not tied to an
instance.
"""
fixture = {'name': 'test image', 'is_public': False}
image_id = self.service.create(self.context, fixture)['id']
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
actual = self.service.show(self.context, image_id)
self.assertThat(actual, matchers.DictMatches(expected))
def test_create(self):
fixture = self._make_fixture(name='test image')
num_images = len(self.service.detail(self.context))
image_id = self.service.create(self.context, fixture)['id']
self.assertIsNotNone(image_id)
self.assertEqual(num_images + 1,
len(self.service.detail(self.context)))
def test_create_and_show_non_existing_image(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
self.assertIsNotNone(image_id)
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
'bad image id')
def test_detail_private_image(self):
fixture = self._make_fixture(name='test image')
fixture['is_public'] = False
properties = {'owner_id': 'proj1'}
fixture['properties'] = properties
self.service.create(self.context, fixture)['id']
proj = self.context.project_id
self.context.project_id = 'proj1'
image_metas = self.service.detail(self.context)
self.context.project_id = proj
self.assertEqual(1, len(image_metas))
self.assertEqual(image_metas[0]['name'], 'test image')
self.assertEqual(image_metas[0]['is_public'], False)
def test_detail_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[1])
self.assertEqual(len(image_metas), 8)
i = 2
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, limit=5)
self.assertEqual(len(image_metas), 5)
def test_page_size(self):
with mock.patch.object(glance.GlanceClientWrapper, 'call') as a_mock:
self.service.detail(self.context, page_size=5)
self.assertEqual(a_mock.called, True)
a_mock.assert_called_with(self.context, 1, 'list',
filters={'is_public': 'none'},
page_size=5)
def test_detail_default_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context)
for i, meta in enumerate(image_metas):
self.assertEqual(meta['name'], 'TestImage %d' % (i))
def test_detail_marker_and_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[3], limit=5)
self.assertEqual(len(image_metas), 5)
i = 4
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_invalid_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
self.assertRaises(exception.Invalid, self.service.detail,
self.context, marker='invalidmarker')
def test_update(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
image_id = image['id']
fixture['name'] = 'new image name'
self.service.update(self.context, image_id, fixture)
new_image_data = self.service.show(self.context, image_id)
self.assertEqual('new image name', new_image_data['name'])
def test_delete(self):
fixture1 = self._make_fixture(name='test image 1')
fixture2 = self._make_fixture(name='test image 2')
fixtures = [fixture1, fixture2]
num_images = len(self.service.detail(self.context))
self.assertEqual(0, num_images)
ids = []
for fixture in fixtures:
new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.detail(self.context))
self.assertEqual(2, num_images)
self.service.delete(self.context, ids[0])
# When you delete an image from glance, it sets the status to DELETED
# and doesn't actually remove the image.
# Check the image is still there.
num_images = len(self.service.detail(self.context))
self.assertEqual(2, num_images)
# Check the image is marked as deleted.
num_images = reduce(lambda x, y: x + (0 if y['deleted'] else 1),
self.service.detail(self.context), 0)
self.assertEqual(1, num_images)
def test_show_passes_through_to_client(self):
fixture = self._make_fixture(name='image1', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'image1',
'is_public': True,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
self.assertEqual(image_meta, expected)
def test_show_raises_when_no_authtoken_in_the_context(self):
fixture = self._make_fixture(name='image1',
is_public=False,
properties={'one': 'two'})
image_id = self.service.create(self.context, fixture)['id']
self.context.auth_token = False
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
image_id)
def test_detail_passes_through_to_client(self):
fixture = self._make_fixture(name='image10', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_metas = self.service.detail(self.context)
expected = [
{
'id': image_id,
'name': 'image10',
'is_public': True,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
},
]
self.assertEqual(image_metas, expected)
def test_show_makes_datetimes(self):
fixture = self._make_datetime_fixture()
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_detail_makes_datetimes(self):
fixture = self._make_datetime_fixture()
self.service.create(self.context, fixture)
image_meta = self.service.detail(self.context)[0]
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_download_with_retries(self):
tries = [0]
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
raise glanceclient.exc.ServiceUnavailable('')
else:
return {}
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
service.download, self.context, image_id, data=writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
self.flags(glance_num_retries=1)
service.download(self.context, image_id, data=writer)
def test_download_file_url(self):
self.flags(allowed_direct_url_schemes=['file'])
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that returns a file url."""
(outfd, s_tmpfname) = tempfile.mkstemp(prefix='directURLsrc')
outf = os.fdopen(outfd, 'w')
inf = open('/dev/urandom', 'r')
for i in range(10):
_data = inf.read(1024)
outf.write(_data)
outf.close()
def get(self, image_id):
return type('GlanceTestDirectUrlMeta', (object,),
{'direct_url': 'file://%s' + self.s_tmpfname})
client = MyGlanceStubClient()
(outfd, tmpfname) = tempfile.mkstemp(prefix='directURLdst')
os.close(outfd)
service = self._create_image_service(client)
image_id = 1 # doesn't matter
service.download(self.context, image_id, dst_path=tmpfname)
# compare the two files
rc = filecmp.cmp(tmpfname, client.s_tmpfname)
self.assertTrue(rc, "The file %s and %s should be the same" %
(tmpfname, client.s_tmpfname))
os.remove(client.s_tmpfname)
os.remove(tmpfname)
def test_download_module_filesystem_match(self):
mountpoint = '/'
fs_id = 'someid'
desc = {'id': fs_id, 'mountpoint': mountpoint}
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [
{'url': 'file:///' + os.devnull,
'metadata': desc}]})
def data(self, image_id):
self.outer_test.fail('This should not be called because the '
'transfer module should have intercepted '
'it.')
self.mox.StubOutWithMock(lv_utils, 'copy_image')
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
service = self._create_image_service(client)
#NOTE(Jbresnah) The following options must be added after the module
# has added the specific groups.
self.flags(group='image_file_url:gluster', id=fs_id)
self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
dest_file = os.devnull
lv_utils.copy_image(mox.IgnoreArg(), dest_file)
self.mox.ReplayAll()
service.download(self.context, image_id, dst_path=dest_file)
self.mox.VerifyAll()
def test_download_module_no_filesystem_match(self):
mountpoint = '/'
fs_id = 'someid'
desc = {'id': fs_id, 'mountpoint': mountpoint}
some_data = "sfxvdwjer"
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [
{'url': 'file:///' + os.devnull,
'metadata': desc}]})
def data(self, image_id):
return some_data
def _fake_copyfile(source, dest):
self.fail('This should not be called because a match should not '
'have been found.')
self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
service = self._create_image_service(client)
#NOTE(Jbresnah) The following options must be added after the module
# has added the specific groups.
self.flags(group='image_file_url:gluster', id='someotherid')
self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
service.download(self.context, image_id,
dst_path=os.devnull,
data=None)
def test_download_module_mountpoints(self):
glance_mount = '/glance/mount/point'
_, data_filename = self._get_tempfile()
nova_mount = os.path.dirname(data_filename)
source_path = os.path.basename(data_filename)
file_url = 'file://%s' % os.path.join(glance_mount, source_path)
file_system_id = 'test_FS_ID'
file_system_desc = {'id': file_system_id, 'mountpoint': glance_mount}
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [{'url': file_url,
'metadata': file_system_desc}]})
def data(self, image_id):
self.outer_test.fail('This should not be called because the '
'transfer module should have intercepted '
'it.')
self.copy_called = False
def _fake_copyfile(source, dest):
self.assertEqual(source, data_filename)
self.copy_called = True
self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
service = self._create_image_service(client)
self.flags(group='image_file_url:gluster', id=file_system_id)
self.flags(group='image_file_url:gluster', mountpoint=nova_mount)
service.download(self.context, image_id, dst_path=os.devnull)
self.assertTrue(self.copy_called)
def test_download_module_file_bad_module(self):
_, data_filename = self._get_tempfile()
file_url = 'applesauce://%s' % data_filename
data_called = False
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
data_called = False
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [{'url': file_url,
'metadata': {}}]})
def data(self, image_id):
self.data_called = True
return "someData"
self.flags(allowed_direct_url_schemes=['applesauce'])
self.mox.StubOutWithMock(lv_utils, 'copy_image')
self.flags(allowed_direct_url_schemes=['file'])
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
service = self._create_image_service(client)
# by not calling copyfileobj in the file download module we verify
# that the requirements were not met for its use
self.mox.ReplayAll()
service.download(self.context, image_id, dst_path=os.devnull)
self.mox.VerifyAll()
self.assertTrue(client.data_called)
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
raise glanceclient.exc.Forbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPForbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_notfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
raise glanceclient.exc.NotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_httpnotfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPNotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, dst_path=os.devnull)
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
(service, same_id) = glance.get_remote_image_service(
self.context, image_id)
self.assertEqual(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
(service, same_id) = glance.get_remote_image_service(
self.context, image_url)
self.assertEqual(same_id, image_id)
self.assertEqual(service._client.host, 'something-less-likely')
def _create_failing_glance_client(info):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
raise glanceclient.exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
class TestGlanceClientWrapper(test.NoDBTestCase):
def setUp(self):
super(TestGlanceClientWrapper, self).setUp()
# host1 has no scheme, which is http by default
self.flags(glance_api_servers=['host1:9292', 'https://host2:9293',
'http://host3:9294'])
# Make the test run fast
def _fake_sleep(secs):
pass
self.stubs.Set(time, 'sleep', _fake_sleep)
def test_headers_passed_glanceclient(self):
auth_token = 'auth_token'
ctxt = context.RequestContext('fake', 'fake', auth_token=auth_token)
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
def _get_fake_glanceclient(version, endpoint, **params):
fake_client = glance_stubs.StubGlanceClient(version,
endpoint, **params)
self.assertIsNotNone(fake_client.auth_token)
self.assertIsNotNone(fake_client.identity_headers)
self.assertEqual(fake_client.identity_header['X-Auth_Token'],
auth_token)
self.assertEqual(fake_client.identity_header['X-User-Id'], 'fake')
self.assertIsNone(fake_client.identity_header['X-Roles'])
self.assertIsNone(fake_client.identity_header['X-Tenant-Id'])
self.assertIsNone(fake_client.identity_header['X-Service-Catalog'])
self.assertEqual(fake_client.
identity_header['X-Identity-Status'],
'Confirmed')
self.stubs.Set(glanceclient.Client, '__init__',
_get_fake_glanceclient)
glance._create_glance_client(ctxt, fake_host, fake_port, fake_use_ssl)
def test_static_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_default_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host': 'host1',
'port': 9292,
'use_ssl': False}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, info['host'])
self.assertEqual(port, info['port'])
self.assertEqual(use_ssl, info['use_ssl'])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
info = {'num_calls': 0,
'host': 'host2',
'port': 9293,
'use_ssl': True}
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
self.assertRaises(exception.GlanceConnectionFailed,
client2.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_static_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def test_default_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host0': 'host1',
'port0': 9292,
'use_ssl0': False,
'host1': 'host2',
'port1': 9293,
'use_ssl1': True}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
attempt = info['num_calls']
self.assertEqual(host, info['host%s' % attempt])
self.assertEqual(port, info['port%s' % attempt])
self.assertEqual(use_ssl, info['use_ssl%s' % attempt])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
info = {'num_calls': 0,
'host0': 'host2',
'port0': 9293,
'use_ssl0': True,
'host1': 'host3',
'port1': 9294,
'use_ssl1': False}
client2.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
class TestGlanceUrl(test.NoDBTestCase):
def test_generate_glance_http_url(self):
generated_url = glance.generate_glance_url()
glance_host = CONF.glance_host
# ipv6 address, need to wrap it with '[]'
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
http_url = "http://%s:%d" % (glance_host, CONF.glance_port)
self.assertEqual(generated_url, http_url)
def test_generate_glance_https_url(self):
self.flags(glance_protocol="https")
generated_url = glance.generate_glance_url()
glance_host = CONF.glance_host
# ipv6 address, need to wrap it with '[]'
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
https_url = "https://%s:%d" % (glance_host, CONF.glance_port)
self.assertEqual(generated_url, https_url)
class TestGlanceApiServers(test.TestCase):
def test_get_ipv4_api_servers(self):
self.flags(glance_api_servers=['10.0.1.1:9292',
'https://10.0.0.1:9293',
'http://10.0.2.2:9294'])
glance_host = ['10.0.1.1', '10.0.0.1',
'10.0.2.2']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
# Python 2.6 can not parse ipv6 address correctly
@testtools.skipIf(sys.version_info < (2, 7), "py27 or greater only")
def test_get_ipv6_api_servers(self):
self.flags(glance_api_servers=['[2001:2012:1:f101::1]:9292',
'https://[2010:2013:1:f122::1]:9293',
'http://[2001:2011:1:f111::1]:9294'])
glance_host = ['2001:2012:1:f101::1', '2010:2013:1:f122::1',
'2001:2011:1:f111::1']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
class TestUpdateGlanceImage(test.NoDBTestCase):
def test_start(self):
consumer = glance.UpdateGlanceImage(
'context', 'id', 'metadata', 'stream')
image_service = self.mox.CreateMock(glance.GlanceImageService)
self.mox.StubOutWithMock(glance, 'get_remote_image_service')
glance.get_remote_image_service(
'context', 'id').AndReturn((image_service, 'image_id'))
image_service.update(
'context', 'image_id', 'metadata', 'stream', purge_props=False)
self.mox.ReplayAll()
consumer.start()
| sacharya/nova | nova/tests/image/test_glance.py | Python | apache-2.0 | 38,559 |
# Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import asset_v1
from google.protobuf.json_format import MessageToDict
import click
import time
import json
def all_iam_policies(org_id, as_dict=True):
policies = []
scope = "organizations/{org_id}".format(org_id=org_id)
click.secho(
"Fetching IAM Policies from CAI API using scope {scope}".format(
scope=scope
)
)
client = asset_v1.AssetServiceClient()
response = client.search_all_iam_policies(
request={
"scope": scope,
"page_size": 500,
}
)
for policy in response:
if as_dict:
policies.append(MessageToDict(policy.__class__.pb(policy)))
else:
policies.append(policy)
return policies
def fetch_cai_file(org_id):
if not org_id:
raise SystemExit(
"ERROR: No org id provided. Set the ORG_ID environment variable or pass the --org-id parameter."
)
all_policies = all_iam_policies(org_id)
timestamp = int(time.time())
filename = "cai-iam-{timestamp}.json".format(timestamp=timestamp)
f = open(filename, "w")
json.dump(all_policies, f)
click.secho("Created inventory file {filename}.".format(filename=filename))
return filename | GoogleCloudPlatform/professional-services | tools/iam-permissions-copier/inventory/cai.py | Python | apache-2.0 | 1,851 |
"""
visit https://morvanzhou.github.io/tutorials/ for more!
Build two networks.
1. Without batch normalization
2. With batch normalization
Run tests on these two networks.
"""
# 23 Batch Normalization
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
ACTIVATION = tf.nn.relu
N_LAYERS = 7
N_HIDDEN_UNITS = 30
def fix_seed(seed=1):
# reproducible
np.random.seed(seed)
tf.set_random_seed(seed)
def plot_his(inputs, inputs_norm):
# plot histogram for the inputs of every layer
for j, all_inputs in enumerate([inputs, inputs_norm]):
for i, input in enumerate(all_inputs):
plt.subplot(2, len(all_inputs), j*len(all_inputs)+(i+1))
plt.cla()
if i == 0:
the_range = (-7, 10)
else:
the_range = (-1, 1)
plt.hist(input.ravel(), bins=15, range=the_range, color='#FF5733')
plt.yticks(())
if j == 1:
plt.xticks(the_range)
else:
plt.xticks(())
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.title("%s normalizing" % ("Without" if j == 0 else "With"))
plt.draw()
plt.pause(0.01)
def built_net(xs, ys, norm):
def add_layer(inputs, in_size, out_size, activation_function=None, norm=False):
# weights and biases (bad initialization for this case)
Weights = tf.Variable(tf.random_normal([in_size, out_size], mean=0., stddev=1.))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
# fully connected product
Wx_plus_b = tf.matmul(inputs, Weights) + biases
# normalize fully connected product
if norm:
# Batch Normalize
fc_mean, fc_var = tf.nn.moments(
Wx_plus_b,
axes=[0], # the dimension you wanna normalize, here [0] for batch
# for image, you wanna do [0, 1, 2] for [batch, height, width] but not channel
)
scale = tf.Variable(tf.ones([out_size]))
shift = tf.Variable(tf.zeros([out_size]))
epsilon = 0.001
# apply moving average for mean and var when train on batch
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([fc_mean, fc_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(fc_mean), tf.identity(fc_var)
mean, var = mean_var_with_update()
Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, mean, var, shift, scale, epsilon)
# similar with this two steps:
# Wx_plus_b = (Wx_plus_b - fc_mean) / tf.sqrt(fc_var + 0.001)
# Wx_plus_b = Wx_plus_b * scale + shift
# activation
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
fix_seed(1)
if norm:
# BN for the first input
fc_mean, fc_var = tf.nn.moments(
xs,
axes=[0],
)
scale = tf.Variable(tf.ones([1]))
shift = tf.Variable(tf.zeros([1]))
epsilon = 0.001
# apply moving average for mean and var when train on batch
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([fc_mean, fc_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(fc_mean), tf.identity(fc_var)
mean, var = mean_var_with_update()
xs = tf.nn.batch_normalization(xs, mean, var, shift, scale, epsilon)
# record inputs for every layer
layers_inputs = [xs]
# build hidden layers
for l_n in range(N_LAYERS):
layer_input = layers_inputs[l_n]
in_size = layers_inputs[l_n].get_shape()[1].value
output = add_layer(
layer_input, # input
in_size, # input size
N_HIDDEN_UNITS, # output size
ACTIVATION, # activation function
norm, # normalize before activation
)
layers_inputs.append(output) # add output for next run
# build output layer
prediction = add_layer(layers_inputs[-1], 30, 1, activation_function=None)
cost = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
return [train_op, cost, layers_inputs]
# make up data
fix_seed(1)
x_data = np.linspace(-7, 10, 2500)[:, np.newaxis]
np.random.shuffle(x_data)
noise = np.random.normal(0, 8, x_data.shape)
y_data = np.square(x_data) - 5 + noise
# plot input data
plt.scatter(x_data, y_data)
plt.show()
xs = tf.placeholder(tf.float32, [None, 1]) # [num_samples, num_features]
ys = tf.placeholder(tf.float32, [None, 1])
train_op, cost, layers_inputs = built_net(xs, ys, norm=False) # without BN
train_op_norm, cost_norm, layers_inputs_norm = built_net(xs, ys, norm=True) # with BN
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# record cost
cost_his = []
cost_his_norm = []
record_step = 5
plt.ion()
plt.figure(figsize=(7, 3))
for i in range(250):
if i % 50 == 0:
# plot histogram
all_inputs, all_inputs_norm = sess.run([layers_inputs, layers_inputs_norm], feed_dict={xs: x_data, ys: y_data})
plot_his(all_inputs, all_inputs_norm)
# train on batch
sess.run([train_op, train_op_norm], feed_dict={xs: x_data[i*10:i*10+10], ys: y_data[i*10:i*10+10]})
if i % record_step == 0:
# record cost
cost_his.append(sess.run(cost, feed_dict={xs: x_data, ys: y_data}))
cost_his_norm.append(sess.run(cost_norm, feed_dict={xs: x_data, ys: y_data}))
plt.ioff()
plt.figure()
plt.plot(np.arange(len(cost_his))*record_step, np.array(cost_his), label='no BN') # no norm
plt.plot(np.arange(len(cost_his))*record_step, np.array(cost_his_norm), label='BN') # norm
plt.legend()
plt.show()
| del680202/MachineLearning-memo | src/tensorflow/BN.py | Python | apache-2.0 | 6,157 |
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
from setuptools import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png",
"*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'tornado>=4.0,<5',
'python-daemon<3.0',
]
if os.environ.get('READTHEDOCS', None) == 'True':
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
install_requires.append('sqlalchemy')
# readthedocs don't like python-daemon, see #1342
install_requires.remove('python-daemon<3.0')
install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py
setup(
name='luigi',
version='2.3.3patched1',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='Erik Bernhardsson',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
'luigi-deps-tree = luigi.tools.deps_tree:main',
'luigi-migrate = luigi.tools.migrate:main'
]
},
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
)
| Viktor-Evst/fixed-luigi | setup.py | Python | apache-2.0 | 3,100 |
#!/usr/bin/env python
import unittest
from socket import AF_INET6
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint
from vpp_pg_interface import is_ipv6_misc
from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, VppIpMRoute, \
VppMRoutePath, MRouteItfFlags, MRouteEntryFlags
from vpp_neighbor import find_nbr, VppNeighbor
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q
from scapy.layers.inet6 import IPv6, UDP, ICMPv6ND_NS, ICMPv6ND_RS, \
ICMPv6ND_RA, ICMPv6NDOptSrcLLAddr, getmacbyip6, ICMPv6MRD_Solicitation, \
ICMPv6NDOptMTU, ICMPv6NDOptSrcLLAddr, ICMPv6NDOptPrefixInfo, \
ICMPv6ND_NA, ICMPv6NDOptDstLLAddr, ICMPv6DestUnreach, icmp6types
from util import ppp
from scapy.utils6 import in6_getnsma, in6_getnsmac, in6_ptop, in6_islladdr, \
in6_mactoifaceid, in6_ismaddr
from scapy.utils import inet_pton, inet_ntop
def mk_ll_addr(mac):
euid = in6_mactoifaceid(mac)
addr = "fe80::" + euid
return addr
class TestIPv6ND(VppTestCase):
def validate_ra(self, intf, rx, dst_ip=None):
if not dst_ip:
dst_ip = intf.remote_ip6
# unicasted packets must come to the unicast mac
self.assertEqual(rx[Ether].dst, intf.remote_mac)
# and from the router's MAC
self.assertEqual(rx[Ether].src, intf.local_mac)
# the rx'd RA should be addressed to the sender's source
self.assertTrue(rx.haslayer(ICMPv6ND_RA))
self.assertEqual(in6_ptop(rx[IPv6].dst),
in6_ptop(dst_ip))
# and come from the router's link local
self.assertTrue(in6_islladdr(rx[IPv6].src))
self.assertEqual(in6_ptop(rx[IPv6].src),
in6_ptop(mk_ll_addr(intf.local_mac)))
def validate_na(self, intf, rx, dst_ip=None, tgt_ip=None):
if not dst_ip:
dst_ip = intf.remote_ip6
if not tgt_ip:
dst_ip = intf.local_ip6
# unicasted packets must come to the unicast mac
self.assertEqual(rx[Ether].dst, intf.remote_mac)
# and from the router's MAC
self.assertEqual(rx[Ether].src, intf.local_mac)
# the rx'd NA should be addressed to the sender's source
self.assertTrue(rx.haslayer(ICMPv6ND_NA))
self.assertEqual(in6_ptop(rx[IPv6].dst),
in6_ptop(dst_ip))
# and come from the target address
self.assertEqual(in6_ptop(rx[IPv6].src), in6_ptop(tgt_ip))
# Dest link-layer options should have the router's MAC
dll = rx[ICMPv6NDOptDstLLAddr]
self.assertEqual(dll.lladdr, intf.local_mac)
def send_and_expect_ra(self, intf, pkts, remark, dst_ip=None,
filter_out_fn=is_ipv6_misc):
intf.add_stream(pkts)
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = intf.get_capture(1, filter_out_fn=filter_out_fn)
self.assertEqual(len(rx), 1)
rx = rx[0]
self.validate_ra(intf, rx, dst_ip)
def send_and_assert_no_replies(self, intf, pkts, remark):
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
intf.assert_nothing_captured(remark=remark)
class TestIPv6(TestIPv6ND):
""" IPv6 Test Case """
@classmethod
def setUpClass(cls):
super(TestIPv6, cls).setUpClass()
def setUp(self):
"""
Perform test setup before test case.
**Config:**
- create 3 pg interfaces
- untagged pg0 interface
- Dot1Q subinterface on pg1
- Dot1AD subinterface on pg2
- setup interfaces:
- put it into UP state
- set IPv6 addresses
- resolve neighbor address using NDP
- configure 200 fib entries
:ivar list interfaces: pg interfaces and subinterfaces.
:ivar dict flows: IPv4 packet flows in test.
:ivar list pg_if_packet_sizes: packet sizes in test.
*TODO:* Create AD sub interface
"""
super(TestIPv6, self).setUp()
# create 3 pg interfaces
self.create_pg_interfaces(range(3))
# create 2 subinterfaces for p1 and pg2
self.sub_interfaces = [
VppDot1QSubint(self, self.pg1, 100),
VppDot1QSubint(self, self.pg2, 200)
# TODO: VppDot1ADSubint(self, self.pg2, 200, 300, 400)
]
# packet flows mapping pg0 -> pg1.sub, pg2.sub, etc.
self.flows = dict()
self.flows[self.pg0] = [self.pg1.sub_if, self.pg2.sub_if]
self.flows[self.pg1.sub_if] = [self.pg0, self.pg2.sub_if]
self.flows[self.pg2.sub_if] = [self.pg0, self.pg1.sub_if]
# packet sizes
self.pg_if_packet_sizes = [64, 512, 1518, 9018]
self.sub_if_packet_sizes = [64, 512, 1518 + 4, 9018 + 4]
self.interfaces = list(self.pg_interfaces)
self.interfaces.extend(self.sub_interfaces)
# setup all interfaces
for i in self.interfaces:
i.admin_up()
i.config_ip6()
i.resolve_ndp()
# config 2M FIB entries
self.config_fib_entries(200)
def tearDown(self):
"""Run standard test teardown and log ``show ip6 neighbors``."""
for i in self.sub_interfaces:
i.unconfig_ip6()
i.ip6_disable()
i.admin_down()
i.remove_vpp_config()
super(TestIPv6, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.cli("show ip6 neighbors"))
# info(self.vapi.cli("show ip6 fib")) # many entries
def config_fib_entries(self, count):
"""For each interface add to the FIB table *count* routes to
"fd02::1/128" destination with interface's local address as next-hop
address.
:param int count: Number of FIB entries.
- *TODO:* check if the next-hop address shouldn't be remote address
instead of local address.
"""
n_int = len(self.interfaces)
percent = 0
counter = 0.0
dest_addr = inet_pton(AF_INET6, "fd02::1")
dest_addr_len = 128
for i in self.interfaces:
next_hop_address = i.local_ip6n
for j in range(count / n_int):
self.vapi.ip_add_del_route(
dest_addr, dest_addr_len, next_hop_address, is_ipv6=1)
counter += 1
if counter / count * 100 > percent:
self.logger.info("Configure %d FIB entries .. %d%% done" %
(count, percent))
percent += 1
def create_stream(self, src_if, packet_sizes):
"""Create input packet stream for defined interface.
:param VppInterface src_if: Interface to create packet stream for.
:param list packet_sizes: Required packet sizes.
"""
pkts = []
for i in range(0, 257):
dst_if = self.flows[src_if][i % 2]
info = self.create_packet_info(src_if, dst_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IPv6(src=src_if.remote_ip6, dst=dst_if.remote_ip6) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
if isinstance(src_if, VppSubInterface):
p = src_if.add_dot1_layer(p)
size = packet_sizes[(i // 2) % len(packet_sizes)]
self.extend_packet(p, size)
pkts.append(p)
return pkts
def verify_capture(self, dst_if, capture):
"""Verify captured input packet stream for defined interface.
:param VppInterface dst_if: Interface to verify captured packet stream
for.
:param list capture: Captured packet stream.
"""
self.logger.info("Verifying capture on interface %s" % dst_if.name)
last_info = dict()
for i in self.interfaces:
last_info[i.sw_if_index] = None
is_sub_if = False
dst_sw_if_index = dst_if.sw_if_index
if hasattr(dst_if, 'parent'):
is_sub_if = True
for packet in capture:
if is_sub_if:
# Check VLAN tags and Ethernet header
packet = dst_if.remove_dot1_layer(packet)
self.assertTrue(Dot1Q not in packet)
try:
ip = packet[IPv6]
udp = packet[UDP]
payload_info = self.payload_to_info(str(packet[Raw]))
packet_index = payload_info.index
self.assertEqual(payload_info.dst, dst_sw_if_index)
self.logger.debug(
"Got packet on port %s: src=%u (id=%u)" %
(dst_if.name, payload_info.src, packet_index))
next_info = self.get_next_packet_info_for_interface2(
payload_info.src, dst_sw_if_index,
last_info[payload_info.src])
last_info[payload_info.src] = next_info
self.assertTrue(next_info is not None)
self.assertEqual(packet_index, next_info.index)
saved_packet = next_info.data
# Check standard fields
self.assertEqual(ip.src, saved_packet[IPv6].src)
self.assertEqual(ip.dst, saved_packet[IPv6].dst)
self.assertEqual(udp.sport, saved_packet[UDP].sport)
self.assertEqual(udp.dport, saved_packet[UDP].dport)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
for i in self.interfaces:
remaining_packet = self.get_next_packet_info_for_interface2(
i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
self.assertTrue(remaining_packet is None,
"Interface %s: Packet expected from interface %s "
"didn't arrive" % (dst_if.name, i.name))
def test_fib(self):
""" IPv6 FIB test
Test scenario:
- Create IPv6 stream for pg0 interface
- Create IPv6 tagged streams for pg1's and pg2's subinterface.
- Send and verify received packets on each interface.
"""
pkts = self.create_stream(self.pg0, self.pg_if_packet_sizes)
self.pg0.add_stream(pkts)
for i in self.sub_interfaces:
pkts = self.create_stream(i, self.sub_if_packet_sizes)
i.parent.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg0.get_capture()
self.verify_capture(self.pg0, pkts)
for i in self.sub_interfaces:
pkts = i.parent.get_capture()
self.verify_capture(i, pkts)
def test_ns(self):
""" IPv6 Neighbour Solicitation Exceptions
Test scenario:
- Send an NS Sourced from an address not covered by the link sub-net
- Send an NS to an mcast address the router has not joined
- Send NS for a target address the router does not onn.
"""
#
# An NS from a non link source address
#
nsma = in6_getnsma(inet_pton(AF_INET6, self.pg0.local_ip6))
d = inet_ntop(AF_INET6, nsma)
p = (Ether(dst=in6_getnsmac(nsma)) /
IPv6(dst=d, src="2002::2") /
ICMPv6ND_NS(tgt=self.pg0.local_ip6) /
ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
pkts = [p]
self.send_and_assert_no_replies(
self.pg0, pkts,
"No response to NS source by address not on sub-net")
#
# An NS for sent to a solicited mcast group the router is
# not a member of FAILS
#
if 0:
nsma = in6_getnsma(inet_pton(AF_INET6, "fd::ffff"))
d = inet_ntop(AF_INET6, nsma)
p = (Ether(dst=in6_getnsmac(nsma)) /
IPv6(dst=d, src=self.pg0.remote_ip6) /
ICMPv6ND_NS(tgt=self.pg0.local_ip6) /
ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
pkts = [p]
self.send_and_assert_no_replies(
self.pg0, pkts,
"No response to NS sent to unjoined mcast address")
#
# An NS whose target address is one the router does not own
#
nsma = in6_getnsma(inet_pton(AF_INET6, self.pg0.local_ip6))
d = inet_ntop(AF_INET6, nsma)
p = (Ether(dst=in6_getnsmac(nsma)) /
IPv6(dst=d, src=self.pg0.remote_ip6) /
ICMPv6ND_NS(tgt="fd::ffff") /
ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
pkts = [p]
self.send_and_assert_no_replies(self.pg0, pkts,
"No response to NS for unknown target")
#
# A neighbor entry that has no associated FIB-entry
#
self.pg0.generate_remote_hosts(4)
nd_entry = VppNeighbor(self,
self.pg0.sw_if_index,
self.pg0.remote_hosts[2].mac,
self.pg0.remote_hosts[2].ip6,
af=AF_INET6,
is_no_fib_entry=1)
nd_entry.add_vpp_config()
#
# check we have the neighbor, but no route
#
self.assertTrue(find_nbr(self,
self.pg0.sw_if_index,
self.pg0._remote_hosts[2].ip6,
inet=AF_INET6))
self.assertFalse(find_route(self,
self.pg0._remote_hosts[2].ip6,
128,
inet=AF_INET6))
def validate_ra(self, intf, rx, dst_ip=None, mtu=9000, pi_opt=None):
if not dst_ip:
dst_ip = intf.remote_ip6
# unicasted packets must come to the unicast mac
self.assertEqual(rx[Ether].dst, intf.remote_mac)
# and from the router's MAC
self.assertEqual(rx[Ether].src, intf.local_mac)
# the rx'd RA should be addressed to the sender's source
self.assertTrue(rx.haslayer(ICMPv6ND_RA))
self.assertEqual(in6_ptop(rx[IPv6].dst),
in6_ptop(dst_ip))
# and come from the router's link local
self.assertTrue(in6_islladdr(rx[IPv6].src))
self.assertEqual(in6_ptop(rx[IPv6].src),
in6_ptop(mk_ll_addr(intf.local_mac)))
# it should contain the links MTU
ra = rx[ICMPv6ND_RA]
self.assertEqual(ra[ICMPv6NDOptMTU].mtu, mtu)
# it should contain the source's link layer address option
sll = ra[ICMPv6NDOptSrcLLAddr]
self.assertEqual(sll.lladdr, intf.local_mac)
if not pi_opt:
# the RA should not contain prefix information
self.assertFalse(ra.haslayer(ICMPv6NDOptPrefixInfo))
else:
raos = rx.getlayer(ICMPv6NDOptPrefixInfo, 1)
# the options are nested in the scapy packet in way that i cannot
# decipher how to decode. this 1st layer of option always returns
# nested classes, so a direct obj1=obj2 comparison always fails.
# however, the getlayer(.., 2) does give one instnace.
# so we cheat here and construct a new opt instnace for comparison
rd = ICMPv6NDOptPrefixInfo(prefixlen=raos.prefixlen,
prefix=raos.prefix,
L=raos.L,
A=raos.A)
if type(pi_opt) is list:
for ii in range(len(pi_opt)):
self.assertEqual(pi_opt[ii], rd)
rd = rx.getlayer(ICMPv6NDOptPrefixInfo, ii+2)
else:
self.assertEqual(pi_opt, raos)
def send_and_expect_ra(self, intf, pkts, remark, dst_ip=None,
filter_out_fn=is_ipv6_misc,
opt=None):
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = intf.get_capture(1, filter_out_fn=filter_out_fn)
self.assertEqual(len(rx), 1)
rx = rx[0]
self.validate_ra(intf, rx, dst_ip, pi_opt=opt)
def test_rs(self):
""" IPv6 Router Solicitation Exceptions
Test scenario:
"""
#
# Before we begin change the IPv6 RA responses to use the unicast
# address - that way we will not confuse them with the periodic
# RAs which go to the mcast address
# Sit and wait for the first periodic RA.
#
# TODO
#
self.pg0.ip6_ra_config(send_unicast=1)
#
# An RS from a link source address
# - expect an RA in return
#
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(dst=self.pg0.local_ip6, src=self.pg0.remote_ip6) /
ICMPv6ND_RS())
pkts = [p]
self.send_and_expect_ra(self.pg0, pkts, "Genuine RS")
#
# For the next RS sent the RA should be rate limited
#
self.send_and_assert_no_replies(self.pg0, pkts, "RA rate limited")
#
# When we reconfiure the IPv6 RA config, we reset the RA rate limiting,
# so we need to do this before each test below so as not to drop
# packets for rate limiting reasons. Test this works here.
#
self.pg0.ip6_ra_config(send_unicast=1)
self.send_and_expect_ra(self.pg0, pkts, "Rate limit reset RS")
#
# An RS sent from a non-link local source
#
self.pg0.ip6_ra_config(send_unicast=1)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(dst=self.pg0.local_ip6, src="2002::ffff") /
ICMPv6ND_RS())
pkts = [p]
self.send_and_assert_no_replies(self.pg0, pkts,
"RS from non-link source")
#
# Source an RS from a link local address
#
self.pg0.ip6_ra_config(send_unicast=1)
ll = mk_ll_addr(self.pg0.remote_mac)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(dst=self.pg0.local_ip6, src=ll) /
ICMPv6ND_RS())
pkts = [p]
self.send_and_expect_ra(self.pg0, pkts,
"RS sourced from link-local",
dst_ip=ll)
#
# Send the RS multicast
#
self.pg0.ip6_ra_config(send_unicast=1)
dmac = in6_getnsmac(inet_pton(AF_INET6, "ff02::2"))
ll = mk_ll_addr(self.pg0.remote_mac)
p = (Ether(dst=dmac, src=self.pg0.remote_mac) /
IPv6(dst="ff02::2", src=ll) /
ICMPv6ND_RS())
pkts = [p]
self.send_and_expect_ra(self.pg0, pkts,
"RS sourced from link-local",
dst_ip=ll)
#
# Source from the unspecified address ::. This happens when the RS
# is sent before the host has a configured address/sub-net,
# i.e. auto-config. Since the sender has no IP address, the reply
# comes back mcast - so the capture needs to not filter this.
# If we happen to pick up the periodic RA at this point then so be it,
# it's not an error.
#
self.pg0.ip6_ra_config(send_unicast=1, suppress=1)
p = (Ether(dst=dmac, src=self.pg0.remote_mac) /
IPv6(dst="ff02::2", src="::") /
ICMPv6ND_RS())
pkts = [p]
self.send_and_expect_ra(self.pg0, pkts,
"RS sourced from unspecified",
dst_ip="ff02::1",
filter_out_fn=None)
#
# Configure The RA to announce the links prefix
#
self.pg0.ip6_ra_prefix(self.pg0.local_ip6n,
self.pg0.local_ip6_prefix_len)
#
# RAs should now contain the prefix information option
#
opt = ICMPv6NDOptPrefixInfo(prefixlen=self.pg0.local_ip6_prefix_len,
prefix=self.pg0.local_ip6,
L=1,
A=1)
self.pg0.ip6_ra_config(send_unicast=1)
ll = mk_ll_addr(self.pg0.remote_mac)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(dst=self.pg0.local_ip6, src=ll) /
ICMPv6ND_RS())
self.send_and_expect_ra(self.pg0, p,
"RA with prefix-info",
dst_ip=ll,
opt=opt)
#
# Change the prefix info to not off-link
# L-flag is clear
#
self.pg0.ip6_ra_prefix(self.pg0.local_ip6n,
self.pg0.local_ip6_prefix_len,
off_link=1)
opt = ICMPv6NDOptPrefixInfo(prefixlen=self.pg0.local_ip6_prefix_len,
prefix=self.pg0.local_ip6,
L=0,
A=1)
self.pg0.ip6_ra_config(send_unicast=1)
self.send_and_expect_ra(self.pg0, p,
"RA with Prefix info with L-flag=0",
dst_ip=ll,
opt=opt)
#
# Change the prefix info to not off-link, no-autoconfig
# L and A flag are clear in the advert
#
self.pg0.ip6_ra_prefix(self.pg0.local_ip6n,
self.pg0.local_ip6_prefix_len,
off_link=1,
no_autoconfig=1)
opt = ICMPv6NDOptPrefixInfo(prefixlen=self.pg0.local_ip6_prefix_len,
prefix=self.pg0.local_ip6,
L=0,
A=0)
self.pg0.ip6_ra_config(send_unicast=1)
self.send_and_expect_ra(self.pg0, p,
"RA with Prefix info with A & L-flag=0",
dst_ip=ll,
opt=opt)
#
# Change the flag settings back to the defaults
# L and A flag are set in the advert
#
self.pg0.ip6_ra_prefix(self.pg0.local_ip6n,
self.pg0.local_ip6_prefix_len)
opt = ICMPv6NDOptPrefixInfo(prefixlen=self.pg0.local_ip6_prefix_len,
prefix=self.pg0.local_ip6,
L=1,
A=1)
self.pg0.ip6_ra_config(send_unicast=1)
self.send_and_expect_ra(self.pg0, p,
"RA with Prefix info",
dst_ip=ll,
opt=opt)
#
# Change the prefix info to not off-link, no-autoconfig
# L and A flag are clear in the advert
#
self.pg0.ip6_ra_prefix(self.pg0.local_ip6n,
self.pg0.local_ip6_prefix_len,
off_link=1,
no_autoconfig=1)
opt = ICMPv6NDOptPrefixInfo(prefixlen=self.pg0.local_ip6_prefix_len,
prefix=self.pg0.local_ip6,
L=0,
A=0)
self.pg0.ip6_ra_config(send_unicast=1)
self.send_and_expect_ra(self.pg0, p,
"RA with Prefix info with A & L-flag=0",
dst_ip=ll,
opt=opt)
#
# Use the reset to defults option to revert to defaults
# L and A flag are clear in the advert
#
self.pg0.ip6_ra_prefix(self.pg0.local_ip6n,
self.pg0.local_ip6_prefix_len,
use_default=1)
opt = ICMPv6NDOptPrefixInfo(prefixlen=self.pg0.local_ip6_prefix_len,
prefix=self.pg0.local_ip6,
L=1,
A=1)
self.pg0.ip6_ra_config(send_unicast=1)
self.send_and_expect_ra(self.pg0, p,
"RA with Prefix reverted to defaults",
dst_ip=ll,
opt=opt)
#
# Advertise Another prefix. With no L-flag/A-flag
#
self.pg0.ip6_ra_prefix(self.pg1.local_ip6n,
self.pg1.local_ip6_prefix_len,
off_link=1,
no_autoconfig=1)
opt = [ICMPv6NDOptPrefixInfo(prefixlen=self.pg0.local_ip6_prefix_len,
prefix=self.pg0.local_ip6,
L=1,
A=1),
ICMPv6NDOptPrefixInfo(prefixlen=self.pg1.local_ip6_prefix_len,
prefix=self.pg1.local_ip6,
L=0,
A=0)]
self.pg0.ip6_ra_config(send_unicast=1)
ll = mk_ll_addr(self.pg0.remote_mac)
p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(dst=self.pg0.local_ip6, src=ll) /
ICMPv6ND_RS())
self.send_and_expect_ra(self.pg0, p,
"RA with multiple Prefix infos",
dst_ip=ll,
opt=opt)
#
# Remove the first refix-info - expect the second is still in the
# advert
#
self.pg0.ip6_ra_prefix(self.pg0.local_ip6n,
self.pg0.local_ip6_prefix_len,
is_no=1)
opt = ICMPv6NDOptPrefixInfo(prefixlen=self.pg1.local_ip6_prefix_len,
prefix=self.pg1.local_ip6,
L=0,
A=0)
self.pg0.ip6_ra_config(send_unicast=1)
self.send_and_expect_ra(self.pg0, p,
"RA with Prefix reverted to defaults",
dst_ip=ll,
opt=opt)
#
# Remove the second prefix-info - expect no prefix-info i nthe adverts
#
self.pg0.ip6_ra_prefix(self.pg1.local_ip6n,
self.pg1.local_ip6_prefix_len,
is_no=1)
self.pg0.ip6_ra_config(send_unicast=1)
self.send_and_expect_ra(self.pg0, p,
"RA with Prefix reverted to defaults",
dst_ip=ll)
#
# Reset the periodic advertisements back to default values
#
self.pg0.ip6_ra_config(no=1, suppress=1, send_unicast=0)
class IPv6NDProxyTest(TestIPv6ND):
""" IPv6 ND ProxyTest Case """
def setUp(self):
super(IPv6NDProxyTest, self).setUp()
# create 3 pg interfaces
self.create_pg_interfaces(range(3))
# pg0 is the master interface, with the configured subnet
self.pg0.admin_up()
self.pg0.config_ip6()
self.pg0.resolve_ndp()
self.pg1.ip6_enable()
self.pg2.ip6_enable()
def tearDown(self):
super(IPv6NDProxyTest, self).tearDown()
def test_nd_proxy(self):
""" IPv6 Proxy ND """
#
# Generate some hosts in the subnet that we are proxying
#
self.pg0.generate_remote_hosts(8)
nsma = in6_getnsma(inet_pton(AF_INET6, self.pg0.local_ip6))
d = inet_ntop(AF_INET6, nsma)
#
# Send an NS for one of those remote hosts on one of the proxy links
# expect no response since it's from an address that is not
# on the link that has the prefix configured
#
ns_pg1 = (Ether(dst=in6_getnsmac(nsma), src=self.pg1.remote_mac) /
IPv6(dst=d, src=self.pg0._remote_hosts[2].ip6) /
ICMPv6ND_NS(tgt=self.pg0.local_ip6) /
ICMPv6NDOptSrcLLAddr(lladdr=self.pg0._remote_hosts[2].mac))
self.send_and_assert_no_replies(self.pg1, ns_pg1, "Off link NS")
#
# Add proxy support for the host
#
self.vapi.ip6_nd_proxy(
inet_pton(AF_INET6, self.pg0._remote_hosts[2].ip6),
self.pg1.sw_if_index)
#
# try that NS again. this time we expect an NA back
#
self.pg1.add_stream(ns_pg1)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(1)
self.validate_na(self.pg1, rx[0],
dst_ip=self.pg0._remote_hosts[2].ip6,
tgt_ip=self.pg0.local_ip6)
#
# ... and that we have an entry in the ND cache
#
self.assertTrue(find_nbr(self,
self.pg1.sw_if_index,
self.pg0._remote_hosts[2].ip6,
inet=AF_INET6))
#
# ... and we can route traffic to it
#
t = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
IPv6(dst=self.pg0._remote_hosts[2].ip6,
src=self.pg0.remote_ip6) /
UDP(sport=10000, dport=20000) /
Raw('\xa5' * 100))
self.pg0.add_stream(t)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(1)
rx = rx[0]
self.assertEqual(rx[Ether].dst, self.pg0._remote_hosts[2].mac)
self.assertEqual(rx[Ether].src, self.pg1.local_mac)
self.assertEqual(rx[IPv6].src, t[IPv6].src)
self.assertEqual(rx[IPv6].dst, t[IPv6].dst)
#
# Test we proxy for the host on the main interface
#
ns_pg0 = (Ether(dst=in6_getnsmac(nsma), src=self.pg0.remote_mac) /
IPv6(dst=d, src=self.pg0.remote_ip6) /
ICMPv6ND_NS(tgt=self.pg0._remote_hosts[2].ip6) /
ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
self.pg0.add_stream(ns_pg0)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
self.validate_na(self.pg0, rx[0],
tgt_ip=self.pg0._remote_hosts[2].ip6,
dst_ip=self.pg0.remote_ip6)
#
# Setup and resolve proxy for another host on another interface
#
ns_pg2 = (Ether(dst=in6_getnsmac(nsma), src=self.pg2.remote_mac) /
IPv6(dst=d, src=self.pg0._remote_hosts[3].ip6) /
ICMPv6ND_NS(tgt=self.pg0.local_ip6) /
ICMPv6NDOptSrcLLAddr(lladdr=self.pg0._remote_hosts[2].mac))
self.vapi.ip6_nd_proxy(
inet_pton(AF_INET6, self.pg0._remote_hosts[3].ip6),
self.pg2.sw_if_index)
self.pg2.add_stream(ns_pg2)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg2.get_capture(1)
self.validate_na(self.pg2, rx[0],
dst_ip=self.pg0._remote_hosts[3].ip6,
tgt_ip=self.pg0.local_ip6)
self.assertTrue(find_nbr(self,
self.pg2.sw_if_index,
self.pg0._remote_hosts[3].ip6,
inet=AF_INET6))
#
# hosts can communicate. pg2->pg1
#
t2 = (Ether(dst=self.pg2.local_mac,
src=self.pg0.remote_hosts[3].mac) /
IPv6(dst=self.pg0._remote_hosts[2].ip6,
src=self.pg0._remote_hosts[3].ip6) /
UDP(sport=10000, dport=20000) /
Raw('\xa5' * 100))
self.pg2.add_stream(t2)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(1)
rx = rx[0]
self.assertEqual(rx[Ether].dst, self.pg0._remote_hosts[2].mac)
self.assertEqual(rx[Ether].src, self.pg1.local_mac)
self.assertEqual(rx[IPv6].src, t2[IPv6].src)
self.assertEqual(rx[IPv6].dst, t2[IPv6].dst)
#
# remove the proxy configs
#
self.vapi.ip6_nd_proxy(
inet_pton(AF_INET6, self.pg0._remote_hosts[2].ip6),
self.pg1.sw_if_index,
is_del=1)
self.vapi.ip6_nd_proxy(
inet_pton(AF_INET6, self.pg0._remote_hosts[3].ip6),
self.pg2.sw_if_index,
is_del=1)
self.assertFalse(find_nbr(self,
self.pg2.sw_if_index,
self.pg0._remote_hosts[3].ip6,
inet=AF_INET6))
self.assertFalse(find_nbr(self,
self.pg1.sw_if_index,
self.pg0._remote_hosts[2].ip6,
inet=AF_INET6))
#
# no longer proxy-ing...
#
self.send_and_assert_no_replies(self.pg0, ns_pg0, "Proxy unconfigured")
self.send_and_assert_no_replies(self.pg1, ns_pg1, "Proxy unconfigured")
self.send_and_assert_no_replies(self.pg2, ns_pg2, "Proxy unconfigured")
#
# no longer forwarding. traffic generates NS out of the glean/main
# interface
#
self.pg2.add_stream(t2)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
self.assertTrue(rx[0].haslayer(ICMPv6ND_NS))
class TestIPNull(VppTestCase):
""" IPv6 routes via NULL """
def setUp(self):
super(TestIPNull, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(1))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip6()
i.resolve_ndp()
def tearDown(self):
super(TestIPNull, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip6()
i.admin_down()
def test_ip_null(self):
""" IP NULL route """
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src=self.pg0.remote_ip6, dst="2001::1") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# A route via IP NULL that will reply with ICMP unreachables
#
ip_unreach = VppIpRoute(self, "2001::", 64, [], is_unreach=1, is_ip6=1)
ip_unreach.add_vpp_config()
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
rx = rx[0]
icmp = rx[ICMPv6DestUnreach]
# 0 = "No route to destination"
self.assertEqual(icmp.code, 0)
# ICMP is rate limited. pause a bit
self.sleep(1)
#
# A route via IP NULL that will reply with ICMP prohibited
#
ip_prohibit = VppIpRoute(self, "2001::1", 128, [],
is_prohibit=1, is_ip6=1)
ip_prohibit.add_vpp_config()
self.pg0.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
rx = rx[0]
icmp = rx[ICMPv6DestUnreach]
# 1 = "Communication with destination administratively prohibited"
self.assertEqual(icmp.code, 1)
class TestIPDisabled(VppTestCase):
""" IPv6 disabled """
def setUp(self):
super(TestIPDisabled, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(2))
# PG0 is IP enalbed
self.pg0.admin_up()
self.pg0.config_ip6()
self.pg0.resolve_ndp()
# PG 1 is not IP enabled
self.pg1.admin_up()
def tearDown(self):
super(TestIPDisabled, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def send_and_assert_no_replies(self, intf, pkts, remark):
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
for i in self.pg_interfaces:
i.get_capture(0)
i.assert_nothing_captured(remark=remark)
def test_ip_disabled(self):
""" IP Disabled """
#
# An (S,G).
# one accepting interface, pg0, 2 forwarding interfaces
#
route_ff_01 = VppIpMRoute(
self,
"::",
"ffef::1", 128,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
is_ip6=1)
route_ff_01.add_vpp_config()
pu = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IPv6(src="2001::1", dst=self.pg0.remote_ip6) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pm = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IPv6(src="2001::1", dst="ffef::1") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# PG1 does not forward IP traffic
#
self.send_and_assert_no_replies(self.pg1, pu, "IPv6 disabled")
self.send_and_assert_no_replies(self.pg1, pm, "IPv6 disabled")
#
# IP enable PG1
#
self.pg1.config_ip6()
#
# Now we get packets through
#
self.pg1.add_stream(pu)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
self.pg1.add_stream(pm)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
#
# Disable PG1
#
self.pg1.unconfig_ip6()
#
# PG1 does not forward IP traffic
#
self.send_and_assert_no_replies(self.pg1, pu, "IPv6 disabled")
self.send_and_assert_no_replies(self.pg1, pm, "IPv6 disabled")
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| milanlenco/vpp | test/test_ip6.py | Python | apache-2.0 | 38,904 |
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing Melange Django settings.
"""
__authors__ = [
'"Madhusudan.C.S" <[email protected]>',
'"Augie Fackler" <[email protected]>',
'"Sverre Rabbelier" <[email protected]>',
'"Lennard de Rijk" <[email protected]>',
'"Pawel Solyga" <[email protected]>',
]
import os
# Debug flag True only on App Engine development environment (dev_appserver.py)
# dev_appserver sets SERVER_SOFTWARE to 'Development/1.0'
DEBUG = os.environ['SERVER_SOFTWARE'].startswith('Dev')
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
# 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_ENGINE = 'dummy'
# None of the following are used with appengine
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
# Set to empty string for localhost. Not used with sqlite3.
DATABASE_HOST = ''
# Set to empty string for default. Not used with sqlite3.
DATABASE_PORT = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
# The order of the middleware is as follows because:
# - The ValueStore middleware should be before any other middleware
# so that the value store is available to it.
# - The ExceptionHandler should be the outermost handler (after the
# ValueStore) so as to catch as many errors as possible.
# - The Profile middleware should be as outmost as possible, so that
# as many function calls as possible, but it cannot be before the
# ExceptionHandler (so as to catch exceptions thrown by it).
# - The MaintenanceMiddleware should be after the Profiler, since we
# do want it's actions profiled.
MIDDLEWARE_CLASSES = (
'google.appengine.ext.appstats.recording.AppStatsDjangoMiddleware',
'soc.middleware.value_store.ValueStoreMiddleware',
# 'soc.middleware.exception_handler.ExceptionHandlerMiddleware',
# 'soc.middleware.profiler.ProfileMiddleware',
'soc.middleware.maintenance.MaintenanceMiddleware',
'soc.middleware.blobstore.BlobStoreMiddleware',
'soc.middleware.xsrf.XsrfMiddleware',
# 'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'urls'
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE_DIRS = (
# TODO(proto): customize the template search directories
os.path.join(ROOT_PATH, 'soc', 'templates'),
os.path.join(ROOT_PATH, 'shell', 'templates'),
)
INSTALLED_APPS = (
'soc.views.helper',
'soc.modules.gsoc.views.helper',
'soc.modules.gci.views.helper',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
)
GCI_TASK_QUOTA_LIMIT_ENABLED = False
MODULE_FMT = 'soc.modules.%s.callback'
MODULES = ['gsoc', 'statistic', 'gci']
| SRabbelier/Melange | app/settings.py | Python | apache-2.0 | 4,946 |
"""Base actions for the players to take."""
from csrv.model.actions import action
from csrv.model import cost
from csrv.model import errors
from csrv.model import events
from csrv.model import game_object
from csrv.model import parameters
class PlayOperationAction(action.Action):
DESCRIPTION = '[click]: Play an operation'
COST_CLASS = cost.OperationCost
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
action.Action.resolve(
self,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.player.hq.remove(self.card)
self.card.is_faceup = True
self.card.play()
self.card.log('The corp plays %s' % self.card)
self.player.archives.add(self.card)
@property
def description(self):
return 'Play %s' % self.card.NAME
| mrroach/CentralServer | csrv/model/actions/play_operation_action.py | Python | apache-2.0 | 825 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from collections import defaultdict
import ray
import ray.cloudpickle as cloudpickle
# This string should be identical to the name of the signal sent upon
# detecting that an actor died.
# This constant is also used in NodeManager::PublishActorStateTransition()
# in node_manager.cc
ACTOR_DIED_STR = "ACTOR_DIED_SIGNAL"
logger = logging.getLogger(__name__)
class Signal(object):
"""Base class for Ray signals."""
pass
class ErrorSignal(Signal):
"""Signal raised if an exception happens in a task or actor method."""
def __init__(self, error):
self.error = error
class ActorDiedSignal(Signal):
"""Signal raised if an exception happens in a task or actor method."""
def __init__(self):
pass
def _get_task_id(source):
"""Return the task id associated to the generic source of the signal.
Args:
source: source of the signal, it can be either an object id returned
by a task, a task id, or an actor handle.
Returns:
- If source is an object id, return id of task which creted object.
- If source is an actor handle, return id of actor's task creator.
- If source is a task id, return same task id.
"""
if type(source) is ray.actor.ActorHandle:
return source._actor_id
else:
if type(source) is ray.TaskID:
return source
else:
return ray._raylet.compute_task_id(source)
def send(signal):
"""Send signal.
The signal has a unique identifier that is computed from (1) the id
of the actor or task sending this signal (i.e., the actor or task calling
this function), and (2) an index that is incremented every time this
source sends a signal. This index starts from 1.
Args:
signal: Signal to be sent.
"""
if hasattr(ray.worker.global_worker, "actor_creation_task_id"):
source_key = ray.worker.global_worker.actor_id.hex()
else:
# No actors; this function must have been called from a task
source_key = ray.worker.global_worker.current_task_id.hex()
encoded_signal = ray.utils.binary_to_hex(cloudpickle.dumps(signal))
ray.worker.global_worker.redis_client.execute_command(
"XADD " + source_key + " * signal " + encoded_signal)
def receive(sources, timeout=None):
"""Get all outstanding signals from sources.
A source can be either (1) an object ID returned by the task (we want
to receive signals from), or (2) an actor handle.
When invoked by the same entity E (where E can be an actor, task or
driver), for each source S in sources, this function returns all signals
generated by S since the last receive() was invoked by E on S. If this is
the first call on S, this function returns all past signals generated by S
so far. Note that different actors, tasks or drivers that call receive()
on the same source S will get independent copies of the signals generated
by S.
Args:
sources: List of sources from which the caller waits for signals.
A source is either an object ID returned by a task (in this case
the object ID is used to identify that task), or an actor handle.
If the user passes the IDs of multiple objects returned by the
same task, this function returns a copy of the signals generated
by that task for each object ID.
timeout: Maximum time (in seconds) this function waits to get a signal
from a source in sources. If None, the timeout is infinite.
Returns:
A list of pairs (S, sig), where S is a source in the sources argument,
and sig is a signal generated by S since the last time receive()
was called on S. Thus, for each S in sources, the return list can
contain zero or multiple entries.
"""
# If None, initialize the timeout to a huge value (i.e., over 30,000 years
# in this case) to "approximate" infinity.
if timeout is None:
timeout = 10**12
if timeout < 0:
raise ValueError("The 'timeout' argument cannot be less than 0.")
if not hasattr(ray.worker.global_worker, "signal_counters"):
ray.worker.global_worker.signal_counters = defaultdict(lambda: b"0")
signal_counters = ray.worker.global_worker.signal_counters
# Map the ID of each source task to the source itself.
task_id_to_sources = defaultdict(lambda: [])
for s in sources:
task_id_to_sources[_get_task_id(s).hex()].append(s)
if timeout < 1e-3:
logger.warning("Timeout too small. Using 1ms minimum")
timeout = 1e-3
timeout_ms = int(1000 * timeout)
# Construct the redis query.
query = "XREAD BLOCK "
# redis expects ms.
query += str(timeout_ms)
query += " STREAMS "
query += " ".join([task_id for task_id in task_id_to_sources])
query += " "
query += " ".join([
ray.utils.decode(signal_counters[ray.utils.hex_to_binary(task_id)])
for task_id in task_id_to_sources
])
answers = ray.worker.global_worker.redis_client.execute_command(query)
if not answers:
return []
results = []
# Decoding is a little bit involved. Iterate through all the answers:
for i, answer in enumerate(answers):
# Make sure the answer corresponds to a source, s, in sources.
task_id = ray.utils.decode(answer[0])
task_source_list = task_id_to_sources[task_id]
# The list of results for source s is stored in answer[1]
for r in answer[1]:
for s in task_source_list:
if r[1][1].decode("ascii") == ACTOR_DIED_STR:
results.append((s, ActorDiedSignal()))
else:
# Now it gets tricky: r[0] is the redis internal sequence
# id
signal_counters[ray.utils.hex_to_binary(task_id)] = r[0]
# r[1] contains a list with elements (key, value), in our
# case we only have one key "signal" and the value is the
# signal.
signal = cloudpickle.loads(
ray.utils.hex_to_binary(r[1][1]))
results.append((s, signal))
return results
def forget(sources):
"""Ignore all previous signals associated with each source S in sources.
The index of the next expected signal from S is set to the index of
the last signal that S sent plus 1. This means that the next receive()
on S will only get the signals generated after this function was invoked.
Args:
sources: list of sources whose past signals are forgotten.
"""
# Just read all signals sent by all sources so far.
# This will results in ignoring these signals.
receive(sources, timeout=0)
def reset():
"""
Reset the worker state associated with any signals that this worker
has received so far.
If the worker calls receive() on a source next, it will get all the
signals generated by that source starting with index = 1.
"""
if hasattr(ray.worker.global_worker, "signal_counters"):
ray.worker.global_worker.signal_counters = defaultdict(lambda: b"0")
| ujvl/ray-ng | python/ray/experimental/signal.py | Python | apache-2.0 | 7,355 |
# Copyright 2022 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessors for T5 Tasks."""
# TODO(adarob): Move some of the more general preprocessors to seqio.
import collections
import functools
import math
import re
from typing import Callable, Mapping, Optional, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
# We disable no-value-for-parameter since the seqio.map_over_dataset leads to
# a false positive when seeds are provided.
# pylint:disable=no-value-for-parameter
AUTOTUNE = tf.data.experimental.AUTOTUNE
FeatureType = Mapping[str, tf.Tensor]
rekey = seqio.preprocessors.rekey
tokenize = seqio.preprocessors.tokenize
@seqio.map_over_dataset
def translate(x, source_language, target_language):
"""Convert a translation dataset to a text2text pair.
For example, say the dataset returns examples of this format:
{'de': 'Das ist gut.', 'en': 'That is good.'}
If source_language = 'de', target_language = 'en', then the outputs will have
the format:
{'inputs': 'translate German to English: Das ist gut.',
'targets': 'That is good.'}
Args:
x: an example to process.
source_language: source language code (e.g. 'en') to translate from.
target_language: target language code (e.g. 'de') to translate to.
Returns:
A preprocessed example with the format listed above.
"""
# Language codes like zh-cn are not supported; use only the first 2 chars
for language in (source_language, target_language):
if language != language[:2]:
logging.warning(
'Extended language code %s not supported. Falling back on %s.',
language, language[:2]
)
lang_id_to_string = {
source_language: babel.Locale(source_language[:2]).english_name,
target_language: babel.Locale(target_language[:2]).english_name,
}
src_str = 'translate {}'.format(lang_id_to_string[source_language])
tgt_str = ' to {}: '.format(lang_id_to_string[target_language])
return {
'inputs': tf.strings.join([src_str, tgt_str, x[source_language]]),
'targets': x[target_language],
}
@seqio.map_over_dataset
def summarize(x, article_key, summary_key):
"""Convert a summarization dataset to a text2text pair.
For example, say the dataset returns examples of this format:
{'article': <article>, 'highlights': <summary>}
If article_key = 'article', summary_key = 'highlights', then the outputs will
have the format:
{'inputs': 'summarize': <article>, 'targets': <summary>}
Args:
x: an example to process.
article_key: the feature key for the article to summarize.
summary_key: the feature key for the target summary.
Returns:
A preprocessed example with the format listed above.
"""
strs_to_join = ['summarize:', x[article_key]]
return {
'inputs': tf.strings.join(strs_to_join, separator=' '),
'targets': x[summary_key],
}
# Unicode ranges for characters in non-spaced languages.
# https://en.wikipedia.org/wiki/Category:Writing_systems_without_word_boundaries
# https://en.wikipedia.org/wiki/Han_unification#Unicode_ranges
# https://linguistics.stackexchange.com/questions/6131
NON_SPACED_LANGUAGE_RANGES = (
'\u1000-\u104f', # Burmese
'\u4e00-\u9fff', # CJK Unified Ideographs
'\u3400-\u4dbf', # CJK Unified Ideographs Extension A
'\uf900-\ufaff', # CJK Compatibility Ideographs
'\u2e80-\u2eff', # CJK Radicals Supplement
'\u31c0-\u31ef', # CJK Strokes
'\u3000-\u303f', # CJK Symbols and Punctuation
'\u3040-\u309f', # Japanese Hiragana
'\u30a0-\u30ff', # Japanese Katakana
'\ua980-\ua9df', # Javanese
'\u1780-\u17ff', # Khmer
'\u19e0-\u19ff', # Khmer Symbols
'\u0e80-\u0eff', # Lao
'\u1980-\u19df', # Tai Lue
'\u1a20-\u1aaf', # Tai Tham
'\u0e00-\u0e7f', # Thai
'\u0f00-\u0fff', # Tibetan
)
@seqio.map_over_dataset
def pad_nonspaced_languages(x, text_key='text'):
"""Pad non-spaced languages with spaces around each character.
Args:
x: an example to process.
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
Returns:
A preprocessed example.
"""
res = dict(x)
text = res[text_key]
# Add spaces around any character from a non-spaced language.
pattern = ''.join(NON_SPACED_LANGUAGE_RANGES)
text = tf.strings.regex_replace(text, u'([{}])'.format(pattern), r' \1 ')
# Collapse consecutive whitespace into one space.
text = tf.strings.regex_replace(text, r'\s+', ' ')
res[text_key] = text
return res
def _pad_punctuation(text):
"""Adds spaces around punctuation."""
# Add space around punctuation.
text = tf.strings.regex_replace(text, r'(\W)', r' \1 ')
# Collapse consecutive whitespace into one space.
text = tf.strings.regex_replace(text, r'\s+', ' ')
return text
def _string_join(lst):
# Join on space, but collapse consecutive spaces.
out = tf.strings.join(lst, separator=' ')
return tf.strings.regex_replace(out, r'\s+', ' ')
def trivia_qa(dataset):
"""Convert a TriviaQA example to multiple flattened examples.
TriviaQA produces examples with this form:
{'entity_pages': {dict of wiki entities},
'search_results': <dict of web search results>,
'answer': {dict of all answers}, 'question': <question>,
'question_id': <question_id>, 'question_source': <question_source>}
This function will return flattend examples of the format:
{'inputs': 'question: <question> context: <article>'
'targets': 'answer: <sampled answer>'}
Args:
dataset: a tf.data.Dataset to process.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def triviaqa_question_answer_context(x):
"""Extracts matched contexts and answers.
Returns all matched (question-context, answer) pairs.
Args:
x: A tfds sample.
Returns:
Flattened samples: (question-context, answer).
"""
contexts = []
if 'entity_pages' in x:
contexts.append(x['entity_pages']['wiki_context'])
if 'search_results' in x:
contexts.append(x['search_results']['search_context'])
contexts = tf.concat(contexts, 0)
q = _pad_punctuation(x['question'])
answers = x['answer']['normalized_aliases']
combination_size = tf.size(answers)*tf.size(contexts)
find_answers = tf.TensorArray(
tf.bool, size=combination_size, dynamic_size=True)
selected_answers = tf.TensorArray(
tf.string, size=combination_size, dynamic_size=True)
join_q_c = tf.TensorArray(
tf.string, size=combination_size, dynamic_size=True)
def cond_fn(i, find_answers, selected_answers, join_q_c):
del find_answers, selected_answers, join_q_c # Unused
return tf.less(i, combination_size)
def body_fn(i, find_answers, selected_answers, join_q_c):
"""Find answers from contexts and join."""
context_idx = tf.math.floordiv(i, tf.size(answers))
answer_idx = tf.math.mod(i, tf.size(answers))
a = _pad_punctuation(answers[answer_idx])
a_ = tf.strings.join(['.*', a, '.*'])
c = _pad_punctuation(contexts[context_idx])
find_a = tf.strings.regex_full_match(
tf.strings.lower(c),
tf.strings.lower(a_))
find_answers = find_answers.write(i, find_a)
selected_answers = selected_answers.write(i, a)
join_q_c_str = _string_join(['question:', q, 'context:', c])
join_q_c = join_q_c.write(i, join_q_c_str)
return (i + 1, find_answers, selected_answers, join_q_c)
_, find_answers, selected_answers, join_q_c = tf.while_loop(
cond_fn,
body_fn,
loop_vars=[
tf.constant(0), find_answers, selected_answers,
join_q_c
])
find_answers = find_answers.stack()
selected_answers = selected_answers.stack()
join_q_c = join_q_c.stack()
selected_answers = tf.boolean_mask(selected_answers, find_answers)
selected_join_q_c = tf.boolean_mask(join_q_c, find_answers)
return selected_join_q_c, selected_answers
def my_fn(x):
"""Create TriviaQA example."""
join_q_c, a = triviaqa_question_answer_context(x)
return {
'inputs': join_q_c,
'targets': a
}
dataset = dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
return dataset.unbatch()
@seqio.map_over_dataset
def squad(x, include_context=True):
"""Convert SQuAD examples to a text2text pair.
SQuAD produces examples with this form:
{'id': <id>, context': <article>, 'question': <question>,
'answers': { 'text': [<n answers>] }}
This function will return examples of the format:
{'inputs': 'question: <question> context: <article>',
'targets': '<answer_0>',
'id': <id>, 'question': <question>, 'context': <context>,
'answers': [<n answers>]},
Args:
x: an example to process.
include_context: a boolean
Returns:
A preprocessed example with the format listed above.
"""
a = _pad_punctuation(x['answers']['text'])
q = _pad_punctuation(x['question'])
c = _pad_punctuation(x['context'])
if include_context:
inputs = _string_join(['question:', q, 'context:', c])
else:
inputs = _string_join(['squad trivia question:', q])
return {
'inputs': inputs,
'targets': a[0],
'id': x['id'],
'context': c,
'question': q,
'answers': a
}
def _span_answer(context, answer_text):
"""Finds start/end indices of answer_text in context after space tokenization.
If answer_tokens is not a sublist of context_tokens, returns empty string.
Args:
context: 0-d string tensor
answer_text: 0-d string
Returns:
A string tensor.
"""
def space_tok(s):
"""Replace non-word chars with space then split on space."""
s = tf.strings.regex_replace(s, r'\W', ' ')
return tf.strings.split(input=[s], sep=' ').values
def find_subseq(n, h):
"""Finds index of needle subsequence inside haystack.
Args:
n: 1-d tensor
h: 1-d tensor same type as n
Returns:
Index of start of n if found found; otherwise -1.
"""
l_n = tf.size(n)
l_h = tf.size(h)
found = -1
for i in tf.range(0, l_h - l_n):
if tf.reduce_all(tf.equal(h[i:i+l_n], n)):
found = i
break
return found
answer_tokens = space_tok(answer_text)
context_tokens = space_tok(context)
start = find_subseq(answer_tokens, context_tokens)
end = start + tf.size(answer_tokens) - 1
# Just take the first candidate that matches exactly.
if tf.equal(start, -1):
return ''
return tf.strings.format('start: {} end: {}', [start, end])
def squad_span_space_tokenized(dataset):
"""Convert SQuAD examples to a text2text pair with span output.
SQuAD produces examples with this form:
{'context': <article>, 'question': <question>,
'answers': { 'text': [<all answers>] }}
This function returns examples with the format
{'inputs': 'context: <article> question: <question>',
'targets': 'start: <start_index> end: <end_index>'}
where <start_index> and <end_index> specify the space-tokenized span
start/end indices. Both <start_index> and <end_index> are included in
the answer. In the case where the tokenized answer is
not found in the tokenized context, the example is skipped.
Args:
dataset: a tf.data.Dataset to process.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def my_fn(x):
"""Create squad example as in squad_span_char, but tokenized on spaces."""
res = dict(x)
res['targets'] = _span_answer(x['context'], x['targets'])
return res
dataset = squad(dataset)
dataset = dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
return dataset.filter(lambda x: tf.strings.length(x['targets']) > 0)
def random_split_text(dataset,
text_key='text',
min_words_per_segment=16,
max_words_per_segment=512,
max_words_total=8192):
"""Randomly split single-string examples into multiple examples each.
Segment lengths are chosen according to a log-uniform distribution.
Each incoming string is chopped into multiple equal-length examples
with the last one possibly being shorter.
If the input string is longer than max_words_total, then we use one random
chunk and discard the rest. This may help with model stability.
The intended use case is to break up long text examples for use in
unsupervised transfer-learning.
We don't really want to use this preprocessor for any dataset which has a
well-defined evaluation procedure. If apply this preprocessor e.g. in an MT
component, then the evaluation job will randomly split text when evaluating
and the BLEU will get funky.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key text_key
text_key: a string
min_words_per_segment: an integer
max_words_per_segment: an integer
max_words_total: an integer
Returns:
a dataset
"""
def random_chunk(x, chunk_size, seed):
"""Pick a random chunk of a 1d Tensor.
The tensor is divided into chunks of length chunk_size, with the last
chunk being potentially smaller. A random chunk is returned.
Args:
x: a 1d tf.Tensor.
chunk_size: an integer.
seed: int32 [2]-Tensor, the random seed.
Returns:
a 1d tf.Tensor with length <= chunk_size.
"""
size = tf.size(x)
num_chunks = tf.maximum(1, (size - 1) // chunk_size + 1)
chunk_num = tf.random.stateless_uniform(
[],
seed=seed,
minval=0,
maxval=num_chunks,
dtype=tf.int32)
return x[chunk_size * chunk_num:chunk_size * (chunk_num + 1)]
@seqio.map_over_dataset(num_seeds=2)
def my_fn(x, seeds):
"""Split one string into multiple strings.
Args:
x: a feature dictionary
seeds: an int32 Tensor, shaped (2, 2), the random seeds.
Returns:
a feature dictionary
"""
text = x[text_key]
words = tf.strings.split([text]).values
if max_words_total:
words = random_chunk(words, max_words_total, seed=seeds[0])
n_words = tf.size(words)
# first pick a length (number of words per segment)
length = tf.cast(
tf.exp(
tf.random.stateless_uniform(
[],
minval=math.log(min_words_per_segment),
maxval=math.log(max_words_per_segment),
seed=seeds[1],
)
),
tf.int32)
# Pad to a multiple of length, then use tf.reshape to split up the words
# into num_segments segments each of the given length.
num_segments = tf.cast(
tf.math.ceil(
tf.cast(n_words, tf.float32) / tf.cast(length, tf.float32)
),
tf.int32)
padding = num_segments * length - n_words
words = tf.pad(words, [[0, padding]])
words = tf.reshape(words, [-1, length])
# Finally, join with spaces and strip. The padding turns into a bunch of
# spaces that get stripped out.
words = tf.strings.reduce_join(words, axis=1, separator=' ')
return {text_key: tf.strings.strip(words)}
return my_fn(dataset).unbatch()
def split_text_to_words(dataset, text_key='text', min_num_words=2):
"""Split text to words and filter out examples with too few words."""
def split(x):
res = dict(x)
res['words'] = tf.strings.split([x[text_key]]).values
return res
dataset = dataset.map(split, num_parallel_calls=AUTOTUNE)
return dataset.filter(lambda x: tf.size(x['words']) >= min_num_words)
def fill_in_the_blank(dataset,
text_key='text',
label='fill: '):
"""Create a dataset consisting of fill-in-the-blank text examples.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'inputs' and 'targets'.
The input string is split on whitespace to form a sequence of words.
This sequence is chopped randomly into segments of one or more words.
Alternate segments are included in the inputs and targets, with a special
word 'X' marking a missing segment.
The given label is prepended to the inputs. Each input string produces two
examples - one the inverse of the other. Inputs with less than two words
are dropped.
EXAMPLE:
input:
{
'text': 'The fat cat sat on the mat.'
}
outputs:
{
'inputs': 'fill: The fat X the X'
'targets': 'X cat sat on X mat.'
}
{
'inputs': 'fill: X cat sat on X mat.'
'targets': 'The fat X the X'
}
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
label: a string, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
@seqio.map_over_dataset(num_seeds=3)
def my_fn(x, seeds):
"""Generates two preprocessed examples that are roughly inverses.
Args:
x: an example dict with text pre-split in `words` feature.
seeds: an int32 Tensor, shaped (3, 2), the random seeds.
Returns:
an example dict with two inputs and two targets, one for each resulting
preprocessed example.
"""
words = x['words']
n_words = tf.size(words)
# First select the break probability. We pick this on a log-uniform
# distribution between 1/(n_words + 1) and 1/2. This means that some
# sequences will be chopped roughly and others finely.
min_log_p_break = -tf.math.log(tf.cast(n_words, tf.float32) + 2.0)
max_log_p_break = -tf.math.log(2.0)
p_break = tf.exp(
tf.random.stateless_uniform(
[],
minval=min_log_p_break,
maxval=max_log_p_break,
seed=seeds[0])
)
# craffel@ says that there may be bugs in random.uniform making it not
# really uniform. This doesn't seem horribly important here, but may
# need another look.
breaks = tf.less(
tf.random.stateless_uniform([n_words - 1], seed=seeds[1]),
p_break)
def one_random_break():
pos = tf.random.stateless_uniform(
[],
minval=0,
maxval=n_words - 1,
dtype=tf.int32,
seed=seeds[2])
return tf.one_hot(pos, n_words - 1,
dtype=tf.bool, on_value=True, off_value=False)
breaks = tf.cond(
tf.math.reduce_any(breaks), lambda: breaks, one_random_break)
breaks = tf.concat([[True], breaks], axis=0)
word_to_seq_id = tf.math.mod(tf.math.cumsum(tf.cast(breaks, tf.int32)), 2)
# separators:
# if in your segment: ' '
# if break to other segment: ' X'
# else: ''
results = []
for seq_id in [0, 1]:
in_my_seq = tf.equal(word_to_seq_id, seq_id)
separator_strings = tf.where(
in_my_seq,
' ',
tf.where(breaks, ' X', '')
)
word_strings = tf.where(in_my_seq, words, '')
all_strings = tf.stack([separator_strings, word_strings], axis=1)
results.append(tf.strings.substr(
tf.strings.reduce_join(all_strings), 1, tf.int32.max))
inputs = tf.stack([tf.strings.join([label, results[0]]),
tf.strings.join([label, results[1]])])
targets = tf.stack([results[1], results[0]])
return {'inputs': inputs, 'targets': targets}
dataset = split_text_to_words(dataset, text_key, min_num_words=2)
return my_fn(dataset).unbatch()
def fill_in_the_blank_sized(
dataset,
size_bins=(1, 2, 4, 8, 16, 32, 64, 128, 256, 512),
text_key='text',
label='fill: '):
"""Fill in the blank preprocessor that labels blank with a binned size.
The actual blank size is sampled uniformly from the inclusive range of the min
and max bin. The blank is then filled in with the closest bin size to the
actual blank size.
Args:
dataset: a tf.data.Dataset, the dataset to preprocess.
size_bins: a list, a list of blank sizes to select from when labelling the
blank.
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
label: a string, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
bins = sorted(size_bins)
@seqio.map_over_dataset(num_seeds=2)
def my_fn(x, seeds):
"""Apply transformation."""
words = x['words']
n_words = tf.size(words)
blank_size = tf.random.stateless_uniform(
[],
minval=bins[0],
maxval=tf.math.minimum(n_words, bins[-1]),
dtype=tf.dtypes.int32,
seed=seeds[0])
bin_delta = tf.math.abs(bins - blank_size)
bin_ = tf.gather(bins, tf.argmin(bin_delta))
blank_start = tf.random.stateless_uniform(
[],
minval=0,
maxval=tf.math.maximum(0, n_words-blank_size) + 1,
dtype=tf.dtypes.int32,
seed=seeds[1])
pre_blank = tf.strings.reduce_join(words[0:blank_start], separator=' ')
post_blank = tf.strings.reduce_join(
words[blank_start+blank_size:], separator=' ')
blank = tf.strings.format('_{}_', bin_)
# We strip to handle cases where blank is at beginning or end.
input_ = tf.strings.strip(
tf.strings.join([pre_blank, blank, post_blank], ' '))
input_ = tf.strings.join([label, input_])
target = tf.strings.reduce_join(
words[blank_start:blank_start+blank_size], separator=' ')
return {
'inputs': tf.strings.strip(input_),
'targets': tf.strings.strip(target)}
dataset = split_text_to_words(dataset, text_key, min_num_words=2)
# Filter out examples with fewer words than the minimum.
dataset = dataset.filter(lambda x: tf.size(x['words']) >= bins[0])
return my_fn(dataset)
def neighboring_pairs(dataset, text_key='text', reuse_sentences=True):
"""Create a dataset consisting of neighboring sentence pairs.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'first' and 'second'.
We only take sentence pairs from within the same line since lines seem to
represent paragraph-like structures in our text datasets. Empty lines and
1-sentence lines will thus be ignored.
The argument reuse_sentences determines whether a sentence can be used as both
the first and last element in the pair. For example, the input with sentences
A,B,C,D will return (A,B),(B,C),(C,D) if reuse_sentences is True and
(A,B),(C,D) if reuse_sentences is False.
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
reuse_sentences: a boolean
Returns:
a tf.data.Dataset
"""
def split_by_lines(dataset):
"""Splits text in dataset by line, removing empty lines."""
def my_fn(text):
lines = tf.strings.split([text], sep='\n').values
return tf.strings.strip(lines)
dataset = dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
dataset = dataset.unbatch()
return dataset.filter(lambda x: tf.strings.length(x) > 0)
def split_into_pairs(line):
"""Split a given text example into pairs of neighboring sentences."""
# TODO(mmatena): Use better sentence segmentation.
sep = str(uuid.uuid4())
sentences = tf.strings.regex_replace(line, r'((?:\.|\!|\?)+)', r'\1' + sep)
sentences = tf.strings.strip(tf.strings.split([sentences], sep).values)
if reuse_sentences:
firsts = sentences[:-1]
seconds = sentences[1:]
else:
firsts = sentences[:-1:2]
seconds = sentences[1::2]
return {
'first': firsts,
'second': seconds,
}
def example_len(x):
return tf.math.minimum(
tf.strings.length(x['first']), tf.strings.length(x['second']))
# Split by lines.
dataset = dataset.map(lambda x: x[text_key], num_parallel_calls=AUTOTUNE)
dataset = split_by_lines(dataset)
# Get pairs of neighboring sentences.
dataset = dataset.map(split_into_pairs, num_parallel_calls=AUTOTUNE)
dataset = dataset.unbatch()
# Remove examples with empty strings.
dataset = dataset.filter(lambda x: example_len(x) > 0)
return dataset
@seqio.map_over_dataset
def glue(x, benchmark_name, label_names, feature_names=None, id_key='idx'):
"""Convert a dataset from glue to text2text examples.
This function uses the feature names from the dataset to unpack examples into
a format amenable for a text2text problem. For example, consider the Quora
Question Pairs (QQP) benchmark, which would suggest
benchmark_name="qqp"
label_names=['not_duplicate', 'duplicate']
For QQP, a typical example might look like
{
"question1": "Why do I easily get bored of my friends?",
"question2": "Why do I get bored of friends so quickly?",
"label": 1,
"idx": 10,
}
This example would be transformed to
{
"inputs": (
"qqp question1: Why do I easily get bored of my friends? question2: "
"Why do I get bored of my friends so quickly?"
),
"targets": "duplicate",
"idx": 10,
}
Args:
x: an example to process.
benchmark_name: the name of the GLUE benchmark for this dataset.
label_names: a list of label names corresponding to class index.
feature_names: an optional ordered list of feature names. If provided,
features will be ordered in this way in the output. If not provided, all
features (except 'idx' and 'label') will be used, sorted by name.
id_key: str, key for id in the dataset. If not provided, 'idx' will be used.
if None, no id will be added to the dataset.
Returns:
A preprocessed example.
"""
# If an ordering is not provided, sort feature keys to ensure a consistent
# order.
feature_keys = (
feature_names or sorted(set(x.keys()).difference(['label', 'idx'])))
# Pack keys (formatted as " key: ") and corresponding text feature
strs_to_join = []
for key in feature_keys:
strs_to_join.append('{}:'.format(key))
strs_to_join.append(x[key])
# Add benchmark name at the start
strs_to_join.insert(0, benchmark_name)
label_name = tf.cond(
# When no label is provided (label == -1), use "<unk>"
tf.equal(x['label'], -1),
lambda: tf.constant('<unk>'),
# Otherwise grab the label text from label_names
lambda: tf.gather(label_names, x['label']),
)
joined = tf.strings.join(strs_to_join, separator=' ')
ex = {}
if benchmark_name == 'multirc':
# Remove HTML markup.
joined = tf.strings.regex_replace(joined, '<br>', ' ')
joined = tf.strings.regex_replace(joined, '<(/)?b>', '')
# Store the data index in the returned example (used by eval)
ex['idx/paragraph'] = x['idx']['paragraph']
ex['idx/question'] = x['idx']['question']
ex['idx/answer'] = x['idx']['answer']
else:
# Store the data index in the returned example (used by eval)
if id_key:
ex['idx'] = x[id_key]
ex['inputs'] = joined
ex['targets'] = label_name
return ex
@seqio.map_over_dataset
def stsb(x):
"""Convert STSB examples to text2text format.
STSB maps two sentences to a floating point number between 1 and 5
representing their semantic similarity. Since we are treating all tasks as
text-to-text tasks we need to convert this floating point number to a string.
The vast majority of the similarity score labels in STSB are in the set
[0, 0.2, 0.4, ..., 4.8, 5.0]. So, we first round the number to the closest
entry in this set, and then we convert the result to a string (literally e.g.
"3.4"). This converts STSB roughly into a 26-class classification dataset.
This function uses the feature names from the dataset to unpack examples into
a format amenable for a text2text problem.
For example, a typical example from STSB might look like
{
"sentence1": "Three more US soldiers killed in Afghanistan",
"sentence2": "NATO Soldier Killed in Afghanistan",
"label": 1.8,
}
This example would be transformed to
{
"inputs": (
"stsb sentence1: Three more US soldiers killed in Afghanistan "
"sentence2: NATO Soldier Killed in Afghanistan"
),
"targets": "1.8",
}
Args:
x: an example to process.
Returns:
A preprocessed example.
"""
strs_to_join = [
'stsb sentence1:', x['sentence1'], 'sentence2:', x['sentence2']
]
label_string = tf.as_string(tf.round(x['label'] * 5) / 5, precision=1)
joined = tf.strings.join(strs_to_join, separator=' ')
return {'inputs': joined, 'targets': label_string, 'idx': x['idx']}
@seqio.map_over_dataset
def wsc(x):
"""Convert WSC examples to text2text format.
WSC includes a sentence along with 2 'spans': the first denoting a noun and
the other a pronoun. The 'label' specifies whether or not the pronoun is
referencing the noun. This preprocessor puts ' * ' around the noun and ' # '
around the pronoun.
For example, a typical example from WSC might look like
{
'text': 'This is a test sentence .',
'span1_text': 'test',
'span1_index': 3,
'span2_text': 'This',
'span2_index': 0,
'label': 0
}
This example would be transformed to
{
'inputs': 'wsc text: # This # is a * test * sentence .',
'targets': 'False'
}
Args:
x: an example to process.
Returns:
A preprocessed example.
"""
def _mark_span(text, span_str, span_idx, mark):
pattern_tmpl = r'^((?:\S+\s){N})(W)'
pattern = tf.strings.regex_replace(pattern_tmpl, 'N',
tf.as_string(span_idx))
pattern = tf.strings.regex_replace(pattern, 'W', span_str)
return tf.strings.regex_replace(text, pattern, r'\1{0} \2 {0}'.format(mark))
text = x['text']
text = _mark_span(text, x['span1_text'], x['span1_index'], '*')
# Compensate for 2 added "words" added in previous step.
span2_index = x['span2_index'] + 2 * tf.cast(
x['span1_index'] < x['span2_index'], tf.int32)
text = _mark_span(text, x['span2_text'], span2_index, '#')
# Add benchmark name at the start
strs_to_join = ['wsc', 'text:', text]
label_name = tf.cond(
# When no label is provided (label == -1), use "<unk>"
tf.equal(x['label'], -1),
lambda: tf.constant('<unk>'),
# Otherwise use False/True.
lambda: tf.gather(['False', 'True'], x['label']))
joined = tf.strings.join(strs_to_join, separator=' ')
return {'inputs': joined, 'targets': label_name, 'idx': x['idx']}
@gin.configurable
def record(dataset):
"""Convert ReCoRD examples to text2text examples.
ReCoRD contains a passage, query containing a '@placeholder' string, and a set
of entities that are the possible values of the placeholder. Each train and
validation example will have a list of answers, any of which would be
considered correct.
For example, a typical example from ReCoRD might look like
{
'passsage': 'This is the passage.',
'query': 'A @placeholder is a bird.',
'entities': ['penguin', 'potato', 'pigeon'],
'answers': ['penguin', 'pigeon'],
}
which this preprocessor would turn into the following two examples:
{
'inputs': 'record query: A @placeholder is a bird. entities: penguin, '
'potato, pigeon passage: This is the passage.',
'targets': 'penguin',
}
and
{
'inputs': 'record query: A @placeholder is a bird. entities: penguin, '
'potato, pigeon passage: This is the passage.',
'targets': 'potato',
}
Args:
dataset: a tf.data.Dataset to process.
Returns:
a tf.data.Dataset
"""
def process_answers(x):
"""Helper fn to get one example per answer."""
ex = x.copy()
num_answers = tf.size(ex['answers'])
def duplicate_along_first_dim(t):
n_duplicates = tf.math.maximum(num_answers, 1)
return tf.broadcast_to(
t, shape=tf.concat([[n_duplicates], tf.shape(t)], axis=0))
for k, v in x.items():
if k != 'idx':
ex[k] = duplicate_along_first_dim(v)
ex['targets'] = tf.cond(
tf.greater(num_answers, 0), lambda: x['answers'],
lambda: tf.constant(['<unk>']))
ex['idx'] = {
'passage': duplicate_along_first_dim(x['idx']['passage']),
'query': duplicate_along_first_dim(x['idx']['query']),
}
return ex
def my_fn(x):
"""Converts the processed example to text2text strings."""
passage = x['passage']
passage = tf.strings.regex_replace(passage,
r'(\.|\?|\!|\"|\')\n@highlight\n',
r'\1 ')
passage = tf.strings.regex_replace(passage, r'\n@highlight\n', '. ')
strs_to_join = [
'record query:', x['query'], 'entities:',
tf.strings.reduce_join(x['entities'], separator=', '), 'passage:',
passage
]
joined = tf.strings.join(strs_to_join, separator=' ')
ex = {}
# Store the data index in the returned example (used by eval)
ex['idx/passage'] = x['idx']['passage']
ex['idx/query'] = x['idx']['query']
ex['inputs'] = joined
# Note that "answers" has been converted to a single string by the
# process_answers function.
ex['targets'] = x['targets']
# Pass-through full list of answers for eval
ex['answers'] = x['answers']
return ex
dataset = dataset.map(process_answers, num_parallel_calls=AUTOTUNE)
dataset = dataset.unbatch()
return dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
def multi_translate(dataset, source_language, target_language):
"""Convert a multi-translate dataset to a text2text pair.
For example, say the dataset returns examples which have a 'translations'
feature key so that examples have the following format:
{
...
'translations': {
'language': ['de', 'fr', 'en'],
'translation': ['Das ist gut.', 'Ca c'est bon', 'That is good.']
},
...
}
If source_language = 'de', target_language = 'en', then this function will
return examples of the format:
{'inputs': 'translate German to English: Das is gut.',
'targets': 'That is good.'}
Any other languages present in the dataset will be filtered out.
Args:
dataset: a tf.data.Dataset to process.
source_language: source language code (e.g. 'en') to translate from.
target_language: target language code (e.g. 'de') to translate to.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def filter_fn(x):
langs = x['translations']['language']
# Test whether both source/target_language appear in the language list
source_in_langs = tf.reduce_any(tf.equal(source_language, langs))
target_in_langs = tf.reduce_any(tf.equal(target_language, langs))
return tf.logical_and(source_in_langs, target_in_langs)
def map_fn(x):
langs = x['translations']['language']
# Retrieve the index in langs where source/target_language appears
src_idx = tf.squeeze(tf.where(tf.equal(langs, source_language)))
tgt_idx = tf.squeeze(tf.where(tf.equal(langs, target_language)))
return {
source_language: x['translations']['translation'][src_idx],
target_language: x['translations']['translation'][tgt_idx],
}
dataset = dataset.filter(filter_fn)
dataset = dataset.map(map_fn, num_parallel_calls=AUTOTUNE)
return translate(dataset, source_language, target_language)
@seqio.map_over_dataset
def definite_pronoun_resolution_simple(x, label='wsc:'):
"""Converts DPR examples to a simple text to text format.
A typical example from the definite pronoun resolution dataset might look like
{
'sentence': 'Bob asked Tom if he can lend some money.',
'pronoun': 'he',
'candidates': ['Bob', 'Tom'],
'label': 1,
}
This will be transformed to
{
'inputs': 'wsc: Bob asked Tom if *he* can lend some money.'
'targets': 'Tom',
}
Args:
x: an example to process.
label: a string, the label to prepend to the inputs.
Returns:
A preprocessed example.
"""
# If there are multiple instances of the pronoun in the sentence, the first
# one is the one that needs to be resolved.
inputs = [
label,
tf.strings.regex_replace(
x['sentence'],
tf.strings.join([r' (', x['pronoun'], r')( |\.|,)']),
r' *\1*\2',
replace_global=False,
),
]
return {
'inputs': tf.strings.join(inputs, separator=' '),
'targets': x['candidates'][x['label']],
}
def next_sentence_prediction(dataset,
text_key='text',
reuse_sentences=True,
label_sentences=False,
p_neighbors=0.5,
label='nsp: ',
buffer_size=50000):
"""Create a dataset containing a next sentence prediction objective.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'inputs' and 'targets'.
EXAMPLE OUTPUTS:
{
input: "nsp: sentence1: The man went to the store. sentence2: Penguins are "
"flightless birds.",
target: "not_next"
}
The "sentence1:" and "sentence2:" labels will be omitted if label_sentences is
False.
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
reuse_sentences: a boolean, see docs for `neighboring_pairs` for more info.
label_sentences: a boolean
p_neighbors: a float between 0 and 1, the probability that a sentence pair
will be neighbors.
label: a string, the label to prepend to the inputs.
buffer_size: an int, the size of the shuffle buffer used to get
non-neighboring sentences.
Returns:
a tf.data.Dataset
"""
sentence1_label, sentence2_label = '', ''
if label_sentences:
sentence1_label, sentence2_label = 'sentence1: ', 'sentence2: '
empty = tf.constant('', dtype=tf.string, shape=[1])
dataset = neighboring_pairs(
dataset, text_key=text_key, reuse_sentences=reuse_sentences)
dataset = dataset.shuffle(buffer_size).batch(2, drop_remainder=True)
def some_are_empty(*tensors):
"""See if at least one tensor has shape [0]."""
empty = [tf.equal(tf.size(t), 0) for t in tensors]
return tf.reduce_any(empty)
@seqio.map_over_dataset(num_seeds=1)
def my_fn(x, seed):
"""Function to be applied to each example in dataset."""
use_neighbors = (
tf.random.stateless_uniform(shape=[], seed=seed) < p_neighbors
)
firsts, seconds = tf.cond(
use_neighbors,
lambda: (x['first'], x['second']),
lambda: (x['first'], tf.stack([x['second'][1], x['second'][0]])),
)
relation_label = tf.cond(
use_neighbors,
lambda: 'next',
lambda: 'not_next',
)
inputs = []
for i in range(2):
first_inputs = firsts[i]
second_inputs = seconds[i]
def create_examples(first_i=first_inputs, second_i=second_inputs):
return tf.strings.join([
label,
sentence1_label,
first_i,
' ',
sentence2_label,
second_i,
])
inpt = tf.cond(
some_are_empty(first_inputs, second_inputs),
lambda: empty,
create_examples,
)
inputs.append(tf.strings.strip(inpt))
inputs = tf.reshape(inputs, [-1])
targets = tf.reshape(2 * [relation_label], [-1])
return {'inputs': inputs, 'targets': targets}
dataset = my_fn(dataset).unbatch()
def example_len(x):
return tf.math.minimum(
tf.strings.length(x['inputs']), tf.strings.length(x['targets']))
# Remove examples with empty strings.
return dataset.filter(lambda x: example_len(x) > 0)
@seqio.map_over_dataset
def lm(x):
"""Basic language modeling objective for text - empty inputs.
Given inputs with the format:
{"text": "Here is some text."}
This preprocess produces examples with the format
{"inputs": "", "targets": "Here is some text."}
Args:
x: an example to process.
Returns:
A preprocessed example.
"""
return {'inputs': '', 'targets': x['text']}
def _wsc_inputs(x):
"""Given an example from SuperGLUE WSC, compute the 'inputs' value.
The output will look like a fill in the blank with the pronoun blanked out.
For example, the text
'Mitchell asked Tom if he could lend some money.'
would be transformed to
'Mitchell asked Tom if X could lend some money.'
Args:
x: A dict that is an example from the WSC task of SuperGLUE.
Returns:
A scalar string tensor.
"""
words = tf.strings.split([x['text']], sep=' ').values
# We would need some special logic to handle the case where the pronoun is the
# first or last word in the text. None of the examples in WSC seem to have
# this, so we are ignoring these cases.
with tf.control_dependencies([
tf.assert_greater(x['span2_index'], 0),
tf.assert_less(x['span2_index'], tf.size(words)),
]):
pronoun_index = tf.identity(x['span2_index'])
def create_input():
with tf.control_dependencies(
[tf.assert_equal(words[pronoun_index], x['span2_text'])]):
return tf.strings.join(
[
tf.strings.reduce_join(words[:pronoun_index], separator=' '),
'X',
tf.strings.reduce_join(
words[pronoun_index + 1:], separator=' '),
],
separator=' ',
)
# Handle some special cases.
if tf.equal(
x['text'],
'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. \"Good for him,\" he said. '
):
return (
'The boy continued to whip the pony , and eventually the pony threw '
'him over. John laughed out quite loud. "Good for X ," he said.'
)
# Using the span2_index, we get 'use' instead of 'it'.
if tf.equal(
x['text'],
'When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use it , but really for now, what more could they wish for?'
):
return (
'When they had eventually calmed down a bit , and had gotten home, '
'Mr. Farley put the magic pebble in an iron safe . Some day they might '
'want to use X , but really for now, what more could they wish for?'
)
return create_input()
def wsc_simple(dataset,
label='wsc:',
correct_referent_only=False):
"""Converts SuperGLUE WSC examples to a simple text to text format.
A typical example from SuperGLUE WSC might look like
{
'text': 'Mitchell asked Tom if he could lend some money.',
'span1_text': 'Tom',
'span2_text': 'he',
'span2_index': 4,
}
This will be transformed to
{
'inputs': 'wsc: Bob asked Tom if *he* can lend some money.'
'targets': 'Tom',
}
The targets will always be the text of the referent regardless of whether it
is the correct referrent of the pronoun. Thus for training purposes, please
set `correct_referent_only` to be True.
Args:
dataset: a tf.data.Dataset
label: a string, the label to prepend to the inputs.
correct_referent_only: a bool, whether to filter out examples for which the
targets is not the correct referent of the pronoun.
Returns:
a tf.data.Dataset
"""
def map_fn(x):
"""Function to be called for every example in dataset."""
inputs = [
label,
tf.strings.regex_replace(
_wsc_inputs(x), r' X ', ' *' + x['span2_text'] + '* '),
]
referent = x['span1_text']
return {
'inputs': tf.strings.join(inputs, separator=' '),
# The reshape is necessary as otherwise the tensor has unknown rank.
'targets': tf.reshape(referent, shape=[]),
'label': x.get('label', 0),
'idx': x['idx'],
}
if correct_referent_only:
dataset = dataset.filter(lambda x: tf.cast(x.get('label', False), tf.bool))
return dataset.map(map_fn, num_parallel_calls=AUTOTUNE)
@seqio.map_over_dataset
def wnli_simple(x, label='wsc:'):
"""Converts GLUE WNLI examples to a simple text to text format.
A typical example from WNLI might look like:
{
'sentence1': 'The fish ate the worm. It was tasty.',
'sentence2': 'The worm was tasty.',
'label': 1,
}
This will be transformed to:
{
'inputs': 'wsc: The fish ate the worm. *It* was tasty.',
'targets': 'The worm',
'premise': 'The fish ate the worm. It was tasty.,
'hypothesis': 'The worm was tasty.',
'label': 1,
}
This preprocessor has been manually verified to produce reasonable WSC
examples for the dev and test sets. Tasks using this preprocessor should only
be used eval and not train.
Args:
x: an example to process.
label: a string, the label to prepend to the inputs.
Returns:
A preprocessed example.
"""
pronouns = ['he', 'she', 'they', 'it', 'her', 'his', 'their', 'them', 'him']
PronounMatch = collections.namedtuple( # pylint: disable=invalid-name
'PronounMatch', ['score', 'index_in_premise', 'candidate'])
def split_clean(s):
"""Returns array of words with punctuation and capitalization removed."""
words = [
re.sub(r'(\.|,|\?|\!)$', '', w) for w in s.strip().lower().split(' ')
]
return [w for w in words if w]
def get_all_pronoun_indices(s):
return [i for i, w in enumerate(s) if w in pronouns]
def get_post_match_size(hypothesis, words):
"""Returns len of largest prefix of words that is substr of hypothesis."""
hypothesis = ' '.join(hypothesis)
for i in range(len(words)):
if ' '.join(words[:i + 1]) not in hypothesis:
return i
return len(words)
def get_pre_match_size(hypothesis, words):
"""Returns len of largest suffix of words that is substr of hypothesis."""
return get_post_match_size(hypothesis[::-1], words[::-1])
def get_pronoun_match(premise, hypothesis, index):
"""Return the PronounMatch for the pronoun at `index` in premise."""
pre, post = premise[:index], premise[index + 1:]
pre_match_size = get_pre_match_size(hypothesis, pre)
post_match_size = get_post_match_size(hypothesis, post)
score = pre_match_size + post_match_size
candidate = ''
if score:
pre_match = pre[-pre_match_size or len(pre):]
post_match = post[:post_match_size]
m = re.search(' '.join(pre_match + [r'(.+)'] + post_match),
' '.join(hypothesis))
if not m:
# Handle cases where the candidate is at the start of the hypthesis.
m = re.search(' '.join([r'^(.+)'] + post_match), ' '.join(hypothesis))
if not m:
# Handle cases where the candidate is at the end of the hypthesis.
m = re.search(' '.join(pre_match + [r'(.+)$']), ' '.join(hypothesis))
if m:
candidate = m.group(1)
return PronounMatch(
score=score, index_in_premise=index, candidate=candidate)
def get_best_pronoun_match(premise, hypothesis):
"""Returns the match for the pronoun in the premise to disambiguate."""
pronoun_indices = get_all_pronoun_indices(premise)
scoredpronouns = [
get_pronoun_match(premise, hypothesis, index)
for index in pronoun_indices
]
return max(scoredpronouns, key=lambda x: x.score)
def highlight(sentence, index):
words = sentence.split(' ')
word = words[index]
if word[-1] in ['.', ',', '!', '?']:
highlighted = '*{}* {}'.format(word[:-1], word[-1])
else:
highlighted = '*{}*'.format(word)
return ' '.join(words[:index] + [highlighted] + words[index + 1:])
def make_nonpossessive(word):
# WSC simple targets will never be possessive, even when the pronoun is
# possesive.
if word.endswith("'"):
return word[:-1]
elif word.endswith("'s"):
return word[:-2]
else:
return word
def clean_up(candidate):
words = candidate.split(' ')
# Sometimes the candidate extraction messes up, and the candidate will start
# with the start of the hypothesis and extend to the correct candidate. We
# can try to clean up the candidate in some cases by removing everything up
# to the last article in the sentence.
article_index = max(
[words.index(art) for art in {'a', 'an', 'the'} if art in words] or [0])
return ' '.join(words[article_index:])
def process_candidate(candidate, hypothesis):
"""Handles special cases and adds proper punctuation/capitalization."""
candidate = clean_up(candidate)
pattern = '({})'.format(' '.join([
r'{}(?:\.|,|\?|\!)?'.format(re.escape(c)) for c in candidate.split(' ')
]))
m = re.search(pattern, hypothesis, re.IGNORECASE)
if not m:
raise ValueError(
'Unable to find candidate "{}" in hypothesis "{}".'.format(
candidate, hypothesis))
candidate = m.group(1)
if candidate and candidate[-1] in ['.', ',', '!', '?']:
candidate = candidate[:-1]
return make_nonpossessive(candidate)
def compute_inputs_and_targets(premise, hypothesis):
"""Compute inputs and targets for WNLI simple."""
premise = tf.compat.as_text(premise.numpy())
hypothesis = tf.compat.as_text(hypothesis.numpy())
match = get_best_pronoun_match(
split_clean(premise), split_clean(hypothesis))
targets = process_candidate(match.candidate, hypothesis)
inputs = '{} {}'.format(label, highlight(premise, match.index_in_premise))
return inputs, targets
inputs, targets = tf.py_function(
compute_inputs_and_targets,
inp=[x['sentence1'], x['sentence2']],
Tout=[tf.string, tf.string])
return {
# The reshape is necessary as otherwise the tensor has unknown rank.
'inputs': tf.reshape(inputs, shape=[]),
'targets': tf.reshape(targets, shape=[]),
'premise': x['sentence1'],
'hypothesis': x['sentence2'],
'label': x.get('label', 0),
'idx': x['idx'],
}
def rank_classification(
ds: tf.data.Dataset,
inputs_fn: Callable[[FeatureType], tf.Tensor],
targets_fn: Callable[[FeatureType], tf.Tensor],
is_correct_fn: Callable[[FeatureType], tf.Tensor],
weight_fn: Optional[Callable[[FeatureType], tf.Tensor]] = None,
mode: str = 'eval',
passthrough_feature_keys: Optional[Sequence[str]] = None,
) -> tf.data.Dataset:
"""Prepare dataset for rank classification scoring.
Intended to be used with `rank_classification` postprocessor and metric.
`inputs_fn` and `targets_fn` must return the 'inputs' and 'targets' features,
respectively, for each possible class label given the raw example features.
'is_correct_fn' must return the 'is_correct' feature, a boolean for whether
each label is correct.
In 'train' mode, only the inputs / targets marked correct will be produced.
In 'eval' mode, all inputs / targets will be produced.
In 'fewshot_eval', all inputs / targets will be produced as a single batch.
Each output example will also be given a unique 'idx' feature. The first dim
is a sequential index for the input example and the second is the index of the
generated output for it. E.g., the second output example from the fourth input
example would be `[3, 1]`.
To be clear, consider the following arguments:
inputs_fn=lambda ex: ex['prefix'],
targets_fn=lambda ex: ex['suffix'],
is_correct_fn=lambda ex: tf.one_hot(ex['label'], num_classes)
weight_fn=lambda ex: ex['weight']
Given the following example:
{
'prefix': ['The farmland needed ', 'The farmland wanted '],
'suffix': ['water', 'cows'],
'label': 0,
'weight': 1.0,
}
the preprocessor would return:
[{
'idx': [0, 0],
'inputs': 'The farmland needed ',
'targets': 'water',
'is_correct': True,
'weight': 1.0
},
{
'idx': [0, 1],
'inputs': 'The farmland wanted ',
'targets': 'cows',
'is_correct': False,
'weight': 1.0
}]
With mode set to 'train', it would return only the first example,
since it uses the correct label. With mode set to 'fewshot_eval', it would
return both examples in a single batch.
Args:
ds: a tf.data.Dataset to preprocess.
inputs_fn: a callable that returns the 'inputs' features for each label
given the input example.
targets_fn: a callable that returns the 'targets' features for each label
given the input example.
is_correct_fn: a callable that returns the 'label' feature. May be an int32
scalar or 1-D Tensor.
weight_fn: a callable that returns the 'weight' feature (float32 scalar).
mode: A string, one of 'train' or'eval 'train' produces only the correct
example(s) based on the label value(s). 'eval' produces an example for
every possible class value, sequentially. 'fewshot_eval' produces an
example for every possible class value, batched together for each input
example.
passthrough_feature_keys: a sequence of feature names that should be passed
through to the output of this preprocessor. eg: ["starburst", "tokens"]
Returns:
A tf.data.Dataset containing 'idx', inputs', 'targets', and 'is_correct'.
"""
if mode not in ('train', 'eval', 'fewshot_eval'):
raise ValueError(
"Mode must be one of 'train', 'eval', or 'fewshot_eval'. "
f"Got '{mode}'.")
def make_examples(idx, ex):
inputs = inputs_fn(ex)
targets = targets_fn(ex)
is_correct = tf.cast(is_correct_fn(ex), tf.bool)
tf.debugging.assert_equal(
tf.size(is_correct), [tf.size(inputs), tf.size(targets)],
'`inputs_fn`, `targets_fn`, and `is_correct_fn` must return the same '
'size tensors.')
num_out = tf.size(is_correct)
in_idx = tf.fill([num_out], tf.cast(idx, tf.int32))
out_idx = tf.range(num_out)
output = {
'idx': tf.stack([in_idx, out_idx], 1),
'inputs': inputs,
'targets': targets,
'is_correct': is_correct,
}
if passthrough_feature_keys is not None:
for feature_name in passthrough_feature_keys:
output[feature_name] = [ex[feature_name]] * len(targets)
if weight_fn is not None:
output['weight'] = tf.fill(tf.shape(is_correct), weight_fn(ex))
output['weight'] = tf.cast(output['weight'], tf.float32)
return output
ds = ds.enumerate()
ds = ds.map(make_examples, num_parallel_calls=AUTOTUNE)
if mode != 'fewshot_eval':
ds = ds.unbatch()
if mode == 'train':
ds = ds.filter(lambda ex: ex['is_correct'])
return ds
def rank_classification_formatter(
ds: tf.data.Dataset,
inputs_formats: Union[str, Sequence[str]],
targets_formats: Union[str, Sequence[str]],
mode: str = 'eval',
label_key: str = 'label',
weight_key: Optional[str] = None) -> tf.data.Dataset:
"""Create 'inputs' and 'targets' strings for ranking classification.
Intended to be used with `rank_classification` postprocessor and metric.
Inputs will be formatted by filling in the feature values in the
`inputs_formats` and `targets_formats` strings.
Nested features can be accessed by concatenating the features using forward
slash. For eg: if sub-sub-key is nested under sub-key, which is nested under
key, then sub-sub-key can be accessed using key/sub-key/sub-sub-key.
In 'eval' mode, a separate example will be produced for each targets / inputs
format string. These can then be scored to find the one with the highest
likelihood. The `rank_classification` postprocessor and metric allow you to
evaluate with this technique.
In 'train' mode, only the targets / inputs format string indexed by the
label(s) will be produced. In 'eval' mode, all inputs / targets will be
produced.
Each input example will also be given a unique, sequential index called 'idx'.
For example, with arguments:
```
inputs_format='{premise} What is the {question}? X',
targets_formats=[
'I think {choice1}.',
'I think {choice2}.'
],
mode='eval'
```
given the input:
{
'premise': 'The farmland needed irrigation.',
'question': 'effect',
'choice1' : 'a canal was constructed',
'choice2': 'the crops grew tall',
'label': 0,
}
the preprocessor would return:
[{
'idx': 0,
'inputs': 'The farmland needed irrigation. What is the effect? X',
'targets': 'I think a canal was constructed.',
'is_correct': True
},
{
'idx': 0,
'inputs': 'The farmland needed irrigation. What is the effect? X',
'targets': 'I think the crops grew tall.',
'is_correct': False
}]
With `mode='train'`, it would return only the first example,
since it uses the correct label.
With `mode='fewshot_eval'`, it would return both examples in a single batch.
Args:
ds: a tf.data.Dataset to preprocess.
inputs_formats: A string or a list of strings to format with feature values
to produce 'inputs'. Feature keys should be surrounded by curly braces to
be replaced.
targets_formats: A string or a list of strings to format with feature values
to produce 'targets', one for each possible class value. Feature keys
should be surrounded by curly braces to be replaced.
mode: A string, one of 'train', 'eval', or 'fewshot_train') 'train' produces
only the correct example(s) based on the label value(s). 'eval' produces
an example for every possible class value, sequentially.
'fewshot_eval': produces an example for every possible class value,
batched together for each input example.
label_key: A string, the feature key for the integer label value(s).
weight_key: A string, the feature key for the float example weight.
Returns:
A tf.data.Dataset containing 'idx', inputs', 'targets', and 'is_correct'.
"""
if (isinstance(inputs_formats, (list, tuple)) and
isinstance(targets_formats, (list, tuple))):
if len(inputs_formats) != len(targets_formats):
raise ValueError(
f'The inputs_formats ({len(inputs_formats)}) and '
f'targets_formats ({len(targets_formats)}) are both instances '
'of list or tuple, but do not have matching lengths.')
elif isinstance(inputs_formats, (list, tuple)):
num_classes = len(inputs_formats)
targets_formats = [targets_formats] * num_classes
elif isinstance(targets_formats, (list, tuple)):
num_classes = len(targets_formats)
inputs_formats = [inputs_formats] * num_classes
else:
raise ValueError(
'One of the inputs_formats and targets_formats has to '
f'be a list or tuple, inputs_formats: {inputs_formats}, '
f'target_formats: {targets_formats}.')
def _format_str(features, fmt):
keys = set(re.findall(r'{(\S+)}', fmt))
s = fmt
for k in keys:
value = features
for subkey in k.split('/'):
value = value[subkey]
if not isinstance(value, tf.Tensor):
raise ValueError(
f'Final value of key \'{k}\' must be a tf.string. '
f'Got: {type(value).__name__}')
tf.debugging.assert_type(
value, tf.string,
f'Final value of key \'{k}\' must be a tf.string. '
f'Got: {value.dtype.name}')
s = tf.strings.regex_replace(s, '{%s}' % k, value)
return s
def _apply_formats(features, fmts):
return [_format_str(features, fmt) for fmt in fmts]
def _is_correct_fn(ex):
labels = ex[label_key]
is_correct = tf.one_hot(labels, num_classes, on_value=True, off_value=False)
if labels.shape.rank:
is_correct = tf.math.reduce_any(is_correct, axis=0)
return is_correct
def _weight_fn(ex):
return ex[weight_key]
return rank_classification(
ds,
inputs_fn=functools.partial(_apply_formats, fmts=inputs_formats),
targets_fn=functools.partial(_apply_formats, fmts=targets_formats),
is_correct_fn=_is_correct_fn,
weight_fn=None if weight_key is None else _weight_fn,
mode=mode)
@seqio.map_over_dataset
def parse_tsv(line, field_names=None, field_delim='\t'):
"""Splits TSV lines into dict examples mapping field name to string value.
Args:
line: an example containing a comma/tab-delimited string.
field_names: a list of strings, the ordered names of the TSV fields.
Defaults to "inputs" and "targets".
field_delim: a string, the delimiter to split on e.g. ',' for csv.
Returns:
A feature dict mapping field name to string value.
"""
field_names = field_names or ['inputs', 'targets']
return dict(
zip(field_names,
tf.io.decode_csv(
line,
record_defaults=[''] * len(field_names),
field_delim=field_delim,
use_quote_delim=False)))
@seqio.map_over_dataset
def preprocess_tsv(line,
field_delim='\t',
num_fields=2,
inputs_format='{0}',
targets_format='{1}',
field_names=None):
r"""Parse tab-delimited strings into inputs and targets.
This function takes a tf.data.Dataset of strings, each of which contains
tab-delimited fields. The function returns a tf.data.Dataset of feature
dictionaries of the form {"inputs": string, "targets": string}.
inputs_format contains a template string and field numbers or names used to
produce the "inputs" string.
targets_format contains a template string and field numbers or names used to
produce the "targets" string.
Example (field numbers):
The input dataset contains the lines:
"6,7,42"
"2,9,18"
preprocess_tsv(dataset,
field_delim=',',
inputs_format='numerator: {2} denominator: {1}',
targets_format='quotient: {0}'
would produce a dataset containing the dictionaries:
{"inputs": "numerator: 42 denomnator: 7", "targets": "quotient: 6"}
{"inputs": "numerator: 18 denomnator: 9", "targets": "quotient: 2"}
Example (field names):
The input dataset contains the lines:
"6,7,42"
"2,9,18"
preprocess_tsv(dataset,
field_delim=',',
field_names=['quot', 'denom', 'numer'],
inputs_format='numerator: {numer} denominator: {denom}',
targets_format='quotient: {quot}'
would produce a dataset containing the dictionaries:
{"inputs": "numerator: 42 denominator: 7", "targets": "quotient: 6"}
{"inputs": "numerator: 18 denominator: 9", "targets": "quotient: 2"}
Args:
line: an example containing comma/tab-delimited string.
field_delim: a string, the delimiter to split on e.g. ',' for csv.
num_fields: an integer
inputs_format: a string, the desired output format with placeholders for
field values.
targets_format: a string, the desired output format with placeholders for
field values.
field_names: a list of strings, the ordered names of the TSV fields.
defaults to None (i.e. use field number in *_format)
Returns:
A feature dict with 'inputs' and 'targets' features.
"""
def _format_part_with_field_numbers(part, field_values):
found = re.findall(r'{(\d)}', part)
if found:
return field_values[int(found[0])]
else:
return part
def _format_part_with_field_names(part, field_names, field_values):
field_names_re = '|'.join(['{{({})}}'.format(x) for x in field_names])
found = re.findall(field_names_re, part)
if found:
pos = field_names.index(''.join(found[0]))
return field_values[int(pos)]
else:
return part
def _format(format_string, field_names, field_values):
if field_names is None:
parts = [
_format_part_with_field_numbers(p, field_values)
for p in re.split(r'({\d})', format_string)
]
else:
field_names_re = '(' + '|'.join(['{{{}}}'.format(x) for x in field_names
]) + ')'
parts = [
_format_part_with_field_names(p, field_names, field_values)
for p in re.split(field_names_re, format_string)
]
return tf.strings.join(parts)
field_values = tf.io.decode_csv(
line,
record_defaults=[''] *
(num_fields if field_names is None else len(field_names)),
field_delim=field_delim,
use_quote_delim=False)
return {
'inputs': _format(inputs_format, field_names, field_values),
'targets': _format(targets_format, field_names, field_values)
}
# ======================Token Preprocessors=====================================
# TODO(adarob): Add a test.
def span_corruption(dataset,
sequence_length,
output_features,
mean_noise_span_length=3.0,
noise_density=0.15,
input_feature_key='inputs',
merge_examples_to_reduce_padding=True,
reserved_for_packing=None):
"""Final pretraining objective used in Raffel et al., 2019.
Args:
dataset: A tf.data.Dataset with dictionaries containing the key
`input_feature_key`.
sequence_length: dict mapping of feature key to int length for that feature.
output_features: mapping of keys to features.
mean_noise_span_length: the mean number of tokens per masked span per
example.
noise_density: what fraction of the tokens to mask.
input_feature_key: which feature to use from the dataset as the input text
tokens.
merge_examples_to_reduce_padding: if True, combines multiple input examples
to reduce padding.
reserved_for_packing: if specified, reduces the desired inputs length by the
specified amount to enable multiple examples to be packed together
downstream.
Returns:
a dataset
"""
inputs_length = sequence_length[input_feature_key]
if reserved_for_packing:
inputs_length -= reserved_for_packing
input_length, targets_length = random_spans_helper(
extra_tokens_per_span_inputs=1,
extra_tokens_per_span_targets=1,
inputs_length=inputs_length,
mean_noise_span_length=mean_noise_span_length,
noise_density=noise_density)
if sequence_length['targets'] < targets_length:
raise ValueError(
f'Expected targets length for span corruption ({targets_length}) is '
f'greater than configured targets length '
f"({sequence_length['targets']})")
ds = dataset
ds = select_random_chunk(
ds,
output_features=output_features,
feature_key='targets',
max_length=65536)
if merge_examples_to_reduce_padding:
ds = reduce_concat_tokens(ds, feature_key='targets', batch_size=128)
ds = split_tokens(
ds,
feature_key='targets',
min_tokens_per_segment=None,
max_tokens_per_segment=input_length)
ds = denoise(
ds,
output_features,
inputs_fn=noise_span_to_unique_sentinel,
targets_fn=nonnoise_span_to_unique_sentinel,
noise_density=noise_density,
noise_mask_fn=functools.partial(
random_spans_noise_mask,
mean_noise_span_length=mean_noise_span_length),
input_feature_key=input_feature_key)
return ds
# TODO(adarob): Add a test.
def iid_denoising(dataset, sequence_length, output_features):
"""Baseline pretraining objective used in Raffel et al., 2019."""
ds = dataset
ds = select_random_chunk(ds, output_features=output_features,
feature_key='targets', max_length=65536)
ds = reduce_concat_tokens(ds, feature_key='targets', batch_size=128)
ds = split_tokens_to_inputs_length(ds, output_features=output_features,
sequence_length=sequence_length)
ds = denoise(
ds,
output_features,
inputs_fn=noise_span_to_unique_sentinel,
targets_fn=nonnoise_span_to_unique_sentinel,
noise_density=0.15,
noise_mask_fn=iid_noise_mask
)
return ds
def prefix_lm(dataset, sequence_length, output_features):
"""Prefix language modeling objective used in Raffel et al. 2019."""
ds = dataset
ds = select_random_chunk(ds, output_features=output_features,
feature_key='targets', max_length=65536)
ds = split_tokens_to_inputs_length(ds, output_features=output_features,
sequence_length=sequence_length)
ds = denoise(
ds,
output_features,
inputs_fn=drop_nonnoise_tokens,
targets_fn=drop_noise_tokens,
noise_density=0.5,
noise_mask_fn=random_prefix_noise_mask,
)
return ds
def full_lm(dataset, sequence_length, output_features):
"""Full language modeling objective with EOS only at document boundaries."""
ds = dataset
ds = select_random_chunk(ds, output_features=output_features,
feature_key='targets', max_length=65536)
ds = seqio.preprocessors.append_eos(ds, output_features)
ds = reduce_concat_tokens(ds, feature_key='targets', batch_size=128)
# Don't use `split_tokens_to_targets_length` since we've alrady added EOS.
ds = split_tokens(ds, max_tokens_per_segment=sequence_length['targets'])
return ds
@gin.configurable
def select_random_chunk(dataset: tf.data.Dataset,
output_features: Mapping[str, seqio.Feature],
max_length: Optional[int] = None,
feature_key: str = 'targets',
additional_feature_keys: Optional[Sequence[str]] = None,
passthrough_feature_keys: Optional[
Sequence[str]] = None,
sequence_length: Optional[Mapping[str, int]] = None,
uniform_random_start: bool = False,
min_length: Optional[int] = None,
**unused_kwargs) -> tf.data.Dataset:
"""Token-preprocessor to extract one span of at most `max_length` tokens.
If the token sequence is longer than `max_length`, then we return a random
subsequence. Otherwise, we return the full sequence.
This is generally followed by split_tokens.
Args:
dataset: A tf.data.Dataset with dictionaries containing the key feature_key.
output_features: Mapping of keys to features.
max_length: Typically specified in gin configs, takes priority over
sequence_length.
feature_key: Which feature to use from the dataset.
additional_feature_keys: Additional features to use. The same chunk will be
selected from these features as from the one specified in feature_key,
so they should all have the same length.
passthrough_feature_keys: Additional keys to pass through unchanged.
sequence_length: Used if max_length is not specified. Typically passed in
by the data pipeline. feature_key will be used to select the length.
uniform_random_start: If True, will select a starting point in
[-max_length + 1, n_tokens). If False, will select one of a set of chunks
offset by max_length. Both of these starting points try to ensure each
token has an equal probability of being included.
min_length: If specified, lengths of chunks will be selected uniformly at
random from [min_length, max_length]. Note that chunks can end up shorter
than min_length if at the beginning or end of the sequence.
Returns:
a dataset
"""
if passthrough_feature_keys:
chunk_keys = set([feature_key] + (additional_feature_keys or []))
overlap_keys = chunk_keys & set(passthrough_feature_keys)
if overlap_keys:
raise ValueError(
f'chunk keys {overlap_keys} also included in passthrough keys')
if max_length is None and sequence_length is not None:
max_length = sequence_length[feature_key]
if output_features[feature_key].add_eos:
# Leave room to insert an EOS token.
max_length -= 1
if max_length is None:
raise ValueError('Must specify max_length or sequence_length.')
@seqio.map_over_dataset(num_seeds=2)
def _my_fn(x, seeds):
"""Select a random chunk of tokens.
Args:
x: a 1d Tensor
seeds: an int32 Tensor, shaped (2, 2), the random seeds.
Returns:
a 1d Tensor
"""
tokens = x[feature_key]
n_tokens = tf.shape(tokens)[0]
if min_length is not None:
length = tf.random.stateless_uniform(
[],
minval=min_length,
maxval=max_length,
dtype=tf.int32,
seed=seeds[0])
else:
length = max_length
if uniform_random_start:
start = tf.random.stateless_uniform(
[],
minval=-length + 1, # pylint:disable=invalid-unary-operand-type
maxval=n_tokens,
dtype=tf.int32,
seed=seeds[1])
end = tf.minimum(start + length, n_tokens)
start = tf.maximum(start, 0)
else:
num_segments = tf.cast(
tf.math.ceil(
tf.cast(n_tokens, tf.float32) / tf.cast(length, tf.float32)
),
tf.int32)
start = length * tf.random.stateless_uniform(
[],
maxval=num_segments,
dtype=tf.int32,
seed=seeds[1])
end = tf.minimum(start + length, n_tokens)
chunk = {feature_key: tokens[start:end]}
if additional_feature_keys is not None:
for k in additional_feature_keys:
with tf.control_dependencies([
tf.assert_equal(
tf.shape(tokens)[0],
tf.shape(x[k])[0],
message=(f'Additional feature {k} is not the same size as '
f'{feature_key} along axis 0 in select_random_chunk().'
)
)
]):
chunk[k] = x[k][start:end]
if passthrough_feature_keys is not None:
for k in passthrough_feature_keys:
chunk[k] = x[k]
return chunk
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
return _my_fn(dataset)
@gin.configurable
def reduce_concat_tokens(dataset,
feature_key='targets',
batch_size=128,
**unused_kwargs):
"""Token-preprocessor to concatenate multiple unrelated documents.
If we want to generate examples of exactly the right length,
(to avoid wasting space on padding), then we use this function, folowed by
split_tokens.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
feature_key: an string
batch_size: an integer - how many documents to concatenate into one
Returns:
a dataset
"""
dataset = dataset.map(
lambda x: {feature_key: x[feature_key]}, num_parallel_calls=AUTOTUNE)
dataset = dataset.padded_batch(batch_size, padded_shapes={feature_key: [-1]})
def _my_fn(x):
tokens = tf.reshape(x[feature_key], [-1])
# strip padding
tokens = tf.boolean_mask(tokens, tf.cast(tokens, tf.bool))
return {feature_key: tokens}
return dataset.map(_my_fn, num_parallel_calls=AUTOTUNE)
@seqio.map_over_dataset
def trim_tokens_at_front(x,
sequence_length,
keys_to_trim=None,
**unused_kwargs):
"""Token-preprocessor to trim sequence at the beginning.
Args:
x: an example with dictionaries containing keys_to_trim.
sequence_length: a dict of ints.
keys_to_trim: a list of feature keys.
Returns:
A preprocessed example.
"""
for key in (keys_to_trim or sequence_length.keys()):
if key in x:
# trim tokens, leaving room for EOS which gets added later
x[key] = x[key][-(sequence_length[key] - 1):]
return x
def trivia_qa_truncate_inputs(dataset, output_features, sequence_length):
"""Token preprocessor for the trivia QA dataset to truncate inputs.
This function takes a dataset containing "targets" and "inputs". It searches
for the "targets" in the "inputs" and truncates the "inputs" to
`sequence_length` while ensuring that the "targets" are present in the
"inputs". The function will randomly select a subset of "inputs".
If "targets" are not found in the "inputs", then the example is
is dropped from the dataset.
E.g.
Input dataset
{
"inputs": [0, 3, 5, 7, 9, 11, 13, 15, 17, 18]
"targets": [5, 7, 9]
}
Output dataset (assuming sequence_length['inputs'] = 4)
{
"inputs": [3, 5, 7, 9]
"targets": [5, 7, 9]
}
or
{
"inputs": [5, 7, 9, 11]
"targets": [5, 7, 9]
}
Args:
dataset: a tf.data.Dataset with dictionaries containing the "inputs" and
"targets".
output_features: unused by this function.
sequence_length: a dict, with keys as "inputs" and "targets" indicating the
maximum number of tokens in each of the sequences.
Returns:
a dataset
"""
del output_features
@seqio.map_over_dataset(num_seeds=1)
def my_fn(features, seed):
"""Function to map original dataset to the new dataset."""
inputs = features['inputs']
targets = features['targets']
ans_len = tf.shape(targets)[0]
max_input_tokens = sequence_length['inputs']
def truncate_inputs():
"""Helper function to truncate the inputs."""
def answer_in_context(context, answer):
"""Helper function that checks if the answer is present in the context.
Args:
context: Tensor, tokenized representation of the context
answer: Tensor, tokenized representation of the answer
Returns:
result: boolean, indicates if the answer was present in the context.
pos_mask: boolean mask, a mask for every possible start position of
the answer in the context. Indicates whether the answer starts at
the particular position.
"""
conv_inp = tf.reshape(tf.cast(context, tf.float32), [1, -1, 1])
ans_len = tf.shape(answer)[0]
filters = tf.eye(ans_len, dtype=tf.float32)
# Assume context len is N and answer len is M.
# Use a convolution to create a matrix of (N-M) x M elements where
# each row of the matrix is a sequence of len M. This matrix contains
# all possible contiguous sequences of length M from the context.
# Every row of this matrix is compared with the answer to check if the
# answer exists in the context.
strided = tf.nn.conv1d(conv_inp,
tf.reshape(filters, [ans_len, 1, ans_len]), 1,
'VALID')
strided = tf.cast(strided[0], answer.dtype)
pos_mask = tf.reduce_all(
tf.equal(strided, tf.reshape(answer, [1, -1])), 1)
result = tf.reduce_any(pos_mask)
return result, pos_mask
def slice_inputs(inputs, answer_len, pos_mask, seed=None):
"""Helper function to slice inputs while keeping the answer."""
ans_start_pos = tf.cast(tf.where(pos_mask)[0][0], tf.int32)
inputs_len = tf.shape(inputs)[0]
start_range_min = tf.maximum(
0, ans_start_pos - (max_input_tokens - answer_len))
start_range_max = tf.minimum(ans_start_pos,
inputs_len - max_input_tokens) + 1
start_pos = tf.random.stateless_uniform(
[],
minval=start_range_min,
maxval=start_range_max,
dtype=tf.int32,
seed=seed)
return inputs[start_pos:start_pos + max_input_tokens]
result, pos_mask = answer_in_context(inputs, targets)
if result:
return slice_inputs(inputs, ans_len, pos_mask, seed=seed)
else:
return tf.constant([], dtype=inputs.dtype)
if tf.greater(tf.shape(inputs)[0], max_input_tokens):
inputs = truncate_inputs()
return {'inputs': inputs, 'targets': features['targets']}
dataset = my_fn(dataset)
return dataset.filter(lambda x: tf.size(x['inputs']) > 0)
@gin.configurable()
def unsupervised(dataset,
preprocessors=None,
output_features=None,
sequence_length=None):
"""Configure this to point at unsupervised preprocessors.
This function creates an extra level of indirection in case we want
different unsupervised pretraining functions in the future which do not
fit into the denoise() framework.
This function should be used as a post-cache preprocessing function.
Args:
dataset: A tf.data.Dataset to process.
preprocessors: a list of token-preprocessor functions. These functions
should take unused kwargs if output_features or sequence_length is not
used.
output_features: dict(str, Feature), output features of the Task to be
passed to the model.
sequence_length: dict mapping feature key to int length for that feature.
Returns:
A preprocessed tf.data.Dataset.
"""
if preprocessors is None:
logging.warning(
'unsupervised preprocessor got preprocessors=None; no preprocessing '
'will be applied.'
)
return dataset
kwargs = {}
if output_features:
kwargs['output_features'] = output_features
if sequence_length:
kwargs['sequence_length'] = sequence_length
for p in preprocessors:
dataset = p(dataset, **kwargs)
return dataset
# ======================== split_tokens and helpers ============================
@gin.configurable
def split_tokens(dataset: tf.data.Dataset,
min_tokens_per_segment: Optional[int] = None,
max_tokens_per_segment: int = gin.REQUIRED,
feature_key: str = 'targets',
additional_feature_keys: Optional[Sequence[str]] = None,
passthrough_feature_keys: Optional[Sequence[str]] = None,
num_parallel_calls: int = AUTOTUNE,
**unused_kwargs) -> tf.data.Dataset:
"""Split examples into multiple examples each.
The intended use case is to break up long examples for use in unsupervised
transfer-learning.
This function is generally preceded by select_random_chunk.
If min_tokens_per_segment is provided, the segment length is chosen randomly
per document from a log-uniform distribution. If min_tokens_per_segment is
None, then the segment length is max_tokens_per_segment (except for a possibly
shorter last segment in each document).
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
min_tokens_per_segment: an optional integer
max_tokens_per_segment: an integer, the maximum number of tokens in each
segment. Only the final segment may be shorter.
feature_key: a string, the feature to split
additional_feature_keys: Additional features to split. The same chunk size
will be used, so they should be the same size as feature_key.
passthrough_feature_keys: Features to pass through without any splitting.
num_parallel_calls: num_parallel_calls value to pass to map_over_dataset
Returns:
a dataset
"""
if passthrough_feature_keys:
split_keys = set([feature_key] + (additional_feature_keys or []))
overlap_keys = split_keys & set(passthrough_feature_keys)
if overlap_keys:
raise ValueError(
f'split keys {overlap_keys} also included in passthrough keys')
@seqio.map_over_dataset(num_seeds=1, num_parallel_calls=num_parallel_calls)
def _split_tokens(x, seed):
"""Split one token sequence into multiple sequences."""
tokens = x[feature_key]
n_tokens = tf.shape(tokens)[0]
if min_tokens_per_segment is None:
length = max_tokens_per_segment
else:
# pick a length - log-uniformly distributed
length = tf.cast(
tf.exp(
tf.random.stateless_uniform(
[],
minval=math.log(min_tokens_per_segment),
maxval=math.log(max_tokens_per_segment),
seed=seed
)
),
tf.int32)
# Pad to a multiple of length, then use tf.reshape to split up the tokens
# into num_segments segments each of the given length.
num_segments = tf.cast(
tf.math.ceil(
tf.cast(n_tokens, tf.float32) / tf.cast(length, tf.float32))
,
tf.int32)
padding = num_segments * length - tf.shape(tokens)[0]
feature_keys_to_split = [feature_key]
orig_lengths = {}
outputs = {}
if additional_feature_keys is not None:
feature_keys_to_split.extend(additional_feature_keys)
for k in feature_keys_to_split:
with tf.control_dependencies([
tf.assert_equal(
tf.shape(tokens)[0],
tf.shape(x[k])[0],
message=(f'Additional feature {k} is not the same size as '
f'{feature_key} along axis 0 in split_tokens().')
)
]):
shape = tf.shape(x[k])[1:]
shape_list = x[k].shape[1:]
padded = tf.pad(
x[k],
tf.concat([[[0, padding]],
tf.zeros([len(shape_list), 2], dtype=tf.int32)],
axis=0))
orig_lengths[k] = tf.concat(
[tf.repeat(length, num_segments - 1), [length - padding]], axis=0)
outputs[k] = tf.reshape(
padded, tf.concat([[-1, length], shape], axis=0))
if passthrough_feature_keys:
for k in passthrough_feature_keys:
outputs[k] = tf.tile(
tf.expand_dims(x[k], axis=0),
tf.concat([[num_segments], tf.tile([1], [tf.rank(x[k])])], axis=0))
return outputs, orig_lengths
def _strip_padding(inputs, orig_lengths):
output = {}
for k, v in inputs.items():
if passthrough_feature_keys and k in passthrough_feature_keys:
output[k] = v
else:
output[k] = v[:orig_lengths[k]]
return output
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
dataset = _split_tokens(dataset)
dataset = dataset.unbatch()
dataset = dataset.map(_strip_padding, num_parallel_calls=AUTOTUNE)
return dataset
@gin.configurable
def split_tokens_to_inputs_length(dataset, sequence_length,
output_features, **kwargs):
max_tokens = sequence_length['inputs']
if output_features['inputs'].add_eos:
# Leave room to insert an EOS token.
max_tokens -= 1
return split_tokens(dataset, max_tokens_per_segment=max_tokens, **kwargs)
@gin.configurable
def split_tokens_to_targets_length(dataset, sequence_length,
output_features, **kwargs):
max_tokens = sequence_length['targets']
if output_features['targets'].add_eos:
# Leave room to insert an EOS token.
max_tokens -= 1
return split_tokens(dataset, max_tokens_per_segment=max_tokens, **kwargs)
@gin.configurable
def split_tokens_to_random_length(dataset, sequence_length,
output_features, **kwargs):
max_tokens = sequence_length['inputs']
if output_features['inputs'].add_eos:
# Leave room to insert an EOS token.
max_tokens -= 1
return split_tokens(dataset,
min_tokens_per_segment=8,
max_tokens_per_segment=max_tokens,
**kwargs)
@gin.configurable
def concatenate_and_split_to_fixed_length(dataset,
sequence_length,
output_features,
feature_key='targets',
**unused_kwargs):
"""Concatenate tokens across examples, then split to fixed-size chunks.
Chunk length is determined by sequence_length[feature_key].
Args:
dataset: a tf.data.Dataset
sequence_length: a dict of ints.
output_features: a dict mapping feature name to t5.data.Feature.
feature_key: a string
Returns:
a tf.data.Dataset
"""
dataset = dataset.map(lambda x: {feature_key: x[feature_key]})
max_tokens = sequence_length[feature_key]
if output_features[feature_key].add_eos:
# Leave room to insert an EOS token.
max_tokens -= 1
return dataset.unbatch().batch(max_tokens)
@gin.configurable
def filter_by_string_length(dataset,
feature_key='targets',
min_length=1,
max_length=1000000,
**unused_kwargs):
"""Filter examples by string length.
Args:
dataset: a tf.data.Dataset (not tokenized)
feature_key: a string
min_length: an integer
max_length: an integer
Returns:
a tf.data.Dataset
"""
def my_fn(x):
l = tf.strings.length(x[feature_key])
return tf.logical_and(tf.greater_equal(l, min_length),
tf.less_equal(l, max_length))
return dataset.filter(my_fn)
@gin.configurable
def random_spans_helper(inputs_length=gin.REQUIRED,
noise_density=gin.REQUIRED,
mean_noise_span_length=gin.REQUIRED,
extra_tokens_per_span_inputs=gin.REQUIRED,
extra_tokens_per_span_targets=gin.REQUIRED,
verbose=False):
"""Training parameters to avoid padding with random_spans_noise_mask.
When training a model with random_spans_noise_mask, we would like to set the
other training hyperparmeters in a way that avoids padding. This function
helps us compute these hyperparameters.
We assume that each noise span in the input is replaced by
extra_tokens_per_span_inputs sentinel tokens, and each non-noise span in the
targets is replaced by extra_tokens_per_span_targets sentinel tokens.
This function tells us the required number of tokens in the raw example (for
split_tokens()) as well as the length of the encoded targets.
Note that this function assumes the inputs and targets will have EOS appended
and includes that in the reported length.
Args:
inputs_length: an integer - desired length of the tokenized inputs sequence
noise_density: a float
mean_noise_span_length: a float
extra_tokens_per_span_inputs: an integer
extra_tokens_per_span_targets: an integer
verbose: a bool indicating whether to log sequence lengths
Returns:
tokens_length: length of original text in tokens
targets_length: an integer - length in tokens of encoded targets sequence
"""
def _tokens_length_to_inputs_length_targets_length(tokens_length):
num_noise_tokens = int(round(tokens_length * noise_density))
num_nonnoise_tokens = tokens_length - num_noise_tokens
num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
# inputs contain all nonnoise tokens, sentinels for all noise spans
# and one EOS token.
return (
num_nonnoise_tokens +
num_noise_spans * extra_tokens_per_span_inputs + 1,
num_noise_tokens +
num_noise_spans * extra_tokens_per_span_targets + 1)
tokens_length = inputs_length - 1
while (_tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0]
<= inputs_length):
tokens_length += 1
inputs_length, targets_length = (
_tokens_length_to_inputs_length_targets_length(tokens_length))
# minor hack to get the targets length to be equal to inputs length
# which is more likely to have been set to a nice round number.
if noise_density == 0.5 and targets_length > inputs_length:
tokens_length -= 1
targets_length -= 1
if verbose:
logging.info(
'tokens_length=%s inputs_length=%s targets_length=%s '
'noise_density=%s mean_noise_span_length=%s ',
tokens_length, inputs_length, targets_length,
noise_density, mean_noise_span_length)
return tokens_length, targets_length
@gin.configurable
def random_spans_tokens_length():
"""Helper for gin-configuring split_tokens with random_spans_noise_mask."""
return random_spans_helper()[0]
@gin.configurable
def random_spans_targets_length():
"""Helper for gin-configuring the targets sequence length."""
return random_spans_helper()[1]
# ========================== denoise and helpers ===============================
@gin.configurable()
def denoise(dataset,
output_features,
noise_density=gin.REQUIRED,
noise_mask_fn=gin.REQUIRED,
inputs_fn=gin.REQUIRED,
targets_fn=None,
passthrough_feature_keys: Optional[Sequence[str]] = None,
input_feature_key='inputs',
**unused_kwargs):
"""Gin-configurable token preprocessor for self-supervised denoising tasks.
This function takes a dataset containing "targets" sequences,
and turns each sequence into a dictionary containing:
{
"inputs": noisy version of the original sequence
"targets": the full original sequence or missing parts of original sequence
}
In particular, for each sequence, we choose a boolean noise_mask identifying
which tokens in the sequence to corrupt, as defined by the given
noise_mask_fn.
Given the sequence and the noise mask, we generate the inputs and targets
using the given inputs_fn and targets_fn respectively.
The self-supervised tasks vary along these axes:
- noise_density: What fraction of the tokens to select as noise
- noise_mask_fn: What pattern should the noise mask follow
(iid, regular segments, etc.)
- inputs_fn: How to apply the noise
(drop noise tokens, replace with sentinels, etc.)
- targets_fn: How to represent the output
(full sequence, only non-noise tokens, etc.)
Note: Some functionality has been deleted, which we may or may not want to
restore at a later date. The code for this functionality can be found in
the deleted code for this CL. In particular:
- mixture of masking and random replacement
- task labels prepended to the inputs
Args:
dataset: A tf.data.Dataset to process.
output_features: a dict mapping feature name to t5.data.Feature.
noise_density: a float
noise_mask_fn: a function from (length, noise_density) -> boolean mask
inputs_fn: a function from (tokens, noise_mask, vocabulary) -> tokens
targets_fn: a function from (tokens, noise_mask, vocabulary) -> tokens
passthrough_feature_keys: names of additional features to include in output
input_feature_key: name of feature to use as inputs
Returns:
A preprocessed tf.data.Dataset.
"""
if passthrough_feature_keys and (input_feature_key in passthrough_feature_keys
or 'targets' in passthrough_feature_keys):
raise ValueError(
f"passthrough keys cannot contain '{input_feature_key}' or 'targets'")
@seqio.map_over_dataset(num_seeds=6)
def my_fn(features, seeds):
"""Map function."""
tokens = features['targets']
vocabulary = output_features['targets'].vocabulary
if (input_feature_key in output_features and
vocabulary != output_features[input_feature_key].vocabulary):
raise ValueError(
'denoise creates inputs based on tokenized targets but was applied '
'to a task that uses different vocabularies for inputs and targets.')
noise_mask = noise_mask_fn(tf.size(tokens), noise_density, seeds=seeds[:2])
inputs = inputs_fn(tokens, noise_mask, vocabulary, seeds=seeds[2:4])
if targets_fn:
targets = targets_fn(tokens, noise_mask, vocabulary, seeds=seeds[4:6])
else:
targets = tokens
return {
input_feature_key: inputs,
'targets': targets,
**{
k: features[k]
for k in features
if passthrough_feature_keys and k in passthrough_feature_keys
}
}
return my_fn(dataset)
@gin.configurable()
def iid_noise_mask(length, noise_density, seeds):
"""Independent and identically distributed token noise.
Args:
length: an int32 scalar.
noise_density: a float - approximate density of output mask.
seeds: an int32 Tensor, shaped (1, 2), the random seed.
Returns:
a boolean tensor with shape [length].
"""
return tf.random.stateless_uniform([length], seed=seeds[0]) < noise_density
@gin.configurable()
def regular_noise_mask(length,
noise_density,
seeds,
min_span_length=1,
max_span_length=5):
"""Noise mask consisting of equally spaced spans of equal length.
The span length and the offset are chosen randomly per-example.
The beginning and end of the sequence may be part of shorter spans of noise.
For example, if noise_density=0.25 and a span length of 2 is chosen,
then the output might be:
[T F F F F F F T T F F F F F F T T F F F F F F T T F F]
Args:
length: an int32 scalar.
noise_density: a float - approximate density of output mask.
seeds: an int32 Tensor, shaped (2, 2), the random seeds.
min_span_length: an integer.
max_span_length: an integer.
Returns:
a boolean tensor with shape [length].
"""
span_length = tf.random.stateless_uniform(
[],
minval=min_span_length,
maxval=max_span_length + 1,
dtype=tf.int32,
seed=seeds[0])
period = tf.cast(
tf.round(tf.cast(span_length, tf.float32) / noise_density), tf.int32)
offset = tf.random.stateless_uniform(
[],
maxval=period,
dtype=tf.int32,
seed=seeds[1])
return (tf.range(length, dtype=tf.int32) + offset) % period < span_length
@gin.configurable()
def random_spans_noise_mask(length,
noise_density,
seeds,
mean_noise_span_length=3.0):
"""Noise mask consisting of random spans of noise tokens.
The number of noise tokens and the number of noise spans and non-noise spans
are determined deterministically as follows:
num_noise_tokens = round(length * noise_density)
num_nonnoise_spans = num_noise_spans = round(
num_noise_tokens / mean_noise_span_length)
Spans alternate between non-noise and noise, beginning with non-noise.
Subject to the above restrictions, all masks are equally likely.
Args:
length: an int32 scalar (length of the incoming token sequence)
noise_density: a float - approximate density of output mask
seeds: an int32 Tensor, shaped (2, 2)
mean_noise_span_length: a number
Returns:
a boolean tensor with shape [length]
"""
orig_length = length
# increase length to avoid degeneracy
length = tf.maximum(length, 2)
def to_int(x):
return tf.cast(x, tf.int32)
def to_float(x):
return tf.cast(x, tf.float32)
num_noise_tokens = to_int(tf.round(to_float(length) * noise_density))
# avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
num_noise_tokens = tf.minimum(tf.maximum(num_noise_tokens, 1), length - 1)
num_noise_spans = to_int(
tf.round(to_float(num_noise_tokens) / mean_noise_span_length))
# avoid degeneracy by ensuring positive number of noise spans
num_noise_spans = tf.maximum(num_noise_spans, 1)
num_nonnoise_tokens = length - num_noise_tokens
# pick the lengths of the noise spans and the non-noise spans
def _random_segmentation(num_items, num_segments, seed):
"""Partition a sequence of items randomly into non-empty segments.
Args:
num_items: an integer scalar > 0
num_segments: an integer scalar in [1, num_items]
seed: an integer seed
Returns:
a Tensor with shape [num_segments] containing positive integers that add
up to num_items
"""
first_in_segment = tf.pad(
seqio.stateless_shuffle(
to_int(tf.range(num_items - 1) < num_segments - 1),
seed),
[[1, 0]])
segment_id = tf.cumsum(first_in_segment)
segment_length = tf.math.segment_sum(tf.ones_like(segment_id), segment_id)
return segment_length
noise_span_lengths = _random_segmentation(
num_noise_tokens, num_noise_spans, seeds[0])
nonnoise_span_lengths = _random_segmentation(
num_nonnoise_tokens, num_noise_spans, seeds[1])
interleaved_span_lengths = tf.reshape(
tf.stack([nonnoise_span_lengths, noise_span_lengths], axis=1),
[num_noise_spans * 2])
span_starts = tf.cumsum(interleaved_span_lengths)[:-1]
span_start_indicator = tf.math.unsorted_segment_sum(
tf.ones_like(span_starts), span_starts, length)
span_num = tf.cumsum(span_start_indicator)
is_noise = tf.equal(span_num % 2, 1)
return is_noise[:orig_length]
@gin.configurable()
def random_prefix_noise_mask(length, noise_density, seeds):
"""First part of the sequence is noise (for prefix_lm).
The length of the prefix is chosen uniformly between [1, length)
noise_density must be 0.5.
TODO(noam): figure out some distribution to use if noise_density != 0.5.
Args:
length: an int32 scalar.
noise_density: a float - must equal 0.5.
seeds: an int32 Tensor, shaped (1, 2), the random seed.
Returns:
a boolean tensor with shape [length].
"""
if noise_density != 0.5:
raise NotImplementedError(
'noise density must equal 0.5 for random_prefix_noise_mask')
max_input_tokens = length - 1
min_input_tokens = tf.minimum(max_input_tokens, 1)
num_input_tokens = tf.random.stateless_uniform(
[],
minval=min_input_tokens,
maxval=max_input_tokens + 1,
dtype=tf.int32,
seed=seeds[0])
return tf.range(length, dtype=tf.int32) < num_input_tokens
@gin.configurable()
def sentinel_id(vocabulary, return_value=None):
"""Token ID to use as a sentinel.
By default, we use the last token in the vocabulary.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
return_value: an optional integer
Returns:
an integer
"""
if return_value is not None:
return return_value
return vocabulary.vocab_size - 1
@gin.configurable()
def noise_token_to_sentinel(tokens, noise_mask, vocabulary, seeds):
"""Replace each noise token with the given sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
seeds: an unused int32 Tensor
Returns:
a Tensor with the same shape and dtype as tokens
"""
del seeds
return tf.where(noise_mask,
tf.cast(sentinel_id(vocabulary), tokens.dtype),
tokens)
@gin.configurable()
def noise_span_to_sentinel(tokens, noise_mask, vocabulary, seeds):
"""Replace each run of consecutive noise tokens with a single sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
seeds: an unused int32 Tensor
Returns:
a Tensor with the same shape and dtype as tokens
"""
del seeds
tokens = tf.where(noise_mask,
tf.cast(sentinel_id(vocabulary), tokens.dtype),
tokens)
prev_token_is_noise = tf.pad(noise_mask[:-1], [[1, 0]])
subsequent_noise_tokens = tf.logical_and(noise_mask, prev_token_is_noise)
return tf.boolean_mask(tokens, tf.logical_not(subsequent_noise_tokens))
@gin.configurable()
def nonnoise_span_to_sentinel(tokens, noise_mask, vocabulary, seeds):
return noise_span_to_sentinel(
tokens, tf.logical_not(noise_mask), vocabulary, seeds)
@gin.configurable()
def noise_span_to_unique_sentinel(tokens, noise_mask, vocabulary, seeds):
"""Replace each run of consecutive noise tokens with a different sentinel.
The idea here is to be able to align the dropped spans in the inputs
with the markers in the targets.
We want to generate training examples like
"We hold X to be Y that" -> "X these truths Y self evident Z"
Sentinels assigned in decreasing order within the sequence starting at
vocabulary.size - 1. That is, we appropriate the last tokens in the
vocabulary for additional use as sentinels.
TODO(noam): we may want to try enlarging the vocabulary and leaving room
for the sentinels instead. However, this requires enlarging the embedding
tables in the model, so that is a bigger change.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
seeds: an unused int32 Tensor
Returns:
a Tensor with the same shape and dtype as tokens
"""
del seeds
prev_token_is_noise = tf.pad(noise_mask[:-1], [[1, 0]])
first_noise_tokens = tf.logical_and(
noise_mask, tf.logical_not(prev_token_is_noise))
subsequent_noise_tokens = tf.logical_and(noise_mask, prev_token_is_noise)
sentinel = sentinel_id(vocabulary) + 1 - tf.cumsum(
tf.cast(first_noise_tokens, tokens.dtype))
tokens = tf.where(first_noise_tokens, sentinel, tokens)
return tf.boolean_mask(tokens, tf.logical_not(subsequent_noise_tokens))
@gin.configurable()
def nonnoise_span_to_unique_sentinel(tokens, noise_mask, vocabulary, seeds):
return noise_span_to_unique_sentinel(
tokens, tf.logical_not(noise_mask), vocabulary, seeds)
@gin.configurable()
def drop_noise_tokens(tokens, noise_mask, vocabulary, seeds):
"""Drop noise tokens without inserting a sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: an unused vocabulary.Vocabulary
seeds: an unused int32 Tensor
Returns:
a Tensor with the same shape and dtype as tokens
"""
del vocabulary
del seeds
return tf.boolean_mask(tokens, tf.logical_not(noise_mask))
@gin.configurable()
def drop_nonnoise_tokens(tokens, noise_mask, vocabulary, seeds):
"""Drop non-noise tokens without inserting a sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: an unused vocabulary.Vocabulary
seeds: an unused int32 Tensor
Returns:
a Tensor with the same shape and dtype as tokens
"""
del vocabulary
del seeds
return tf.boolean_mask(tokens, noise_mask)
@gin.configurable()
def permute_noise_tokens(tokens, noise_mask, vocabulary, seeds):
"""Permute the noise tokens, keeping the non-noise tokens where they are.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: an unused vocabulary.Vocabulary
seeds: an int32 Tensor, sized (1, 2)
Returns:
a Tensor with the same shape and dtype as tokens
"""
del vocabulary
masked_only = tf.boolean_mask(tokens, noise_mask)
permuted = seqio.stateless_shuffle(masked_only, seeds[0])
# pad to avoid errors when it has size 0
permuted = tf.pad(permuted, [[0, 1]])
indices = tf.cumsum(tf.cast(noise_mask, tf.int32), exclusive=True)
return tf.where(noise_mask,
tf.gather(permuted, indices),
tokens)
@gin.configurable()
def noise_token_to_gathered_token(tokens, noise_mask, vocabulary, seeds):
"""Replace each noise token with a random token from the sequence.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: an unused vocabulary.Vocabulary
seeds: an int32 Tensor, sized (1, 2)
Returns:
a Tensor with the same shape and dtype as tokens
"""
del vocabulary
indices = tf.random.stateless_uniform(
shape=tf.shape(tokens),
maxval=tf.size(tokens),
dtype=tf.int32,
seed=seeds[0])
return tf.where(noise_mask,
tf.gather(tokens, indices),
tokens)
@gin.configurable()
def noise_token_to_random_token(
tokens,
noise_mask,
vocabulary,
seeds,
num_reserved_tokens=3):
"""Replace each noise token with a random token from the vocabulary.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
seeds: an int32 Tensor, shaped (1, 2)
num_reserved_tokens: an integer
Returns:
a Tensor with the same shape and dtype as tokens
"""
return tf.where(noise_mask,
tf.random.stateless_uniform(
tf.shape(tokens),
minval=num_reserved_tokens,
maxval=vocabulary.vocab_size,
dtype=tokens.dtype,
seed=seeds[0]),
tokens)
@gin.configurable()
def noise_token_to_random_token_or_sentinel(
tokens,
noise_mask,
vocabulary,
seeds,
random_prob=0.1):
"""Replace each noise token with a random token or a sentinel.
For each masked token, with probability random_prob, we replace it by a
random token from the vocabulary. Otherwise, we replace it with a sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
seeds: an int32 Tensor, shaped (2, 2).
random_prob: a float
Returns:
a Tensor with the same shape and dtype as tokens
"""
use_random = (
tf.random.stateless_uniform(tf.shape(tokens), seed=seeds[0]) <
random_prob)
return tf.where(
use_random,
noise_token_to_random_token(
tokens, noise_mask, vocabulary, seeds=seeds[1:]),
noise_token_to_sentinel(
tokens, noise_mask, vocabulary, seeds=()))
# =============== EXPERIMENTAL preprocessors (not used for the T5 paper) =======
def trim_and_pad_dataset(dataset, sequence_length):
"""A wrapper to use `seqio.utils.trim_and_pad_dataset` as a preprocessor."""
return seqio.utils.trim_and_pad_dataset(
dataset, feature_lengths=sequence_length)
def targets_for_prefix_lm_objective(dataset, sequence_length, output_features):
"""Prepares targets to be used for prefix LM objective."""
dataset = select_random_chunk(
dataset, output_features, max_length=65536, feature_key='targets')
dataset = seqio.preprocessors.append_eos(dataset, output_features)
dataset = reduce_concat_tokens(dataset, batch_size=128)
dataset = split_tokens(
dataset, max_tokens_per_segment=sequence_length['targets'])
dataset = trim_and_pad_dataset(dataset, sequence_length)
return dataset
def pack_prefix_lm_encoder_decoder(ds, sequence_length, pad_id=0):
"""Pack two examples into one with the prefix LM objective."""
packed_length = next(iter(sequence_length.values()))
assert packed_length % 2 == 0
assert all(l == packed_length for l in sequence_length.values())
@seqio.utils.map_over_dataset(num_seeds=1)
def pack_examples(example_pair, seed):
split_point = tf.random.stateless_uniform((),
minval=1,
maxval=packed_length,
seed=seed,
dtype=tf.int32)
inputs = tf.concat([
example_pair['targets'][0][:split_point],
example_pair['targets'][1][:packed_length - split_point]
],
axis=0)
inputs = tf.reshape(inputs, (packed_length,))
targets = tf.concat([
example_pair['targets'][0][split_point:],
example_pair['targets'][1][packed_length - split_point:]
],
axis=0)
targets = tf.reshape(targets, (packed_length,))
encoder_segment_ids = tf.cast(
tf.range(packed_length) >= split_point, tf.int32) + 1
decoder_segment_ids = tf.cast(
tf.range(packed_length) >= (packed_length - split_point), tf.int32) + 1
decoder_input_tokens = seqio.utils.make_autoregressive_inputs(
targets, sequence_id=decoder_segment_ids)
encoder_positions = tf.concat(
[tf.range(split_point),
tf.range(packed_length - split_point)], axis=0)
encoder_positions = tf.reshape(encoder_positions, (packed_length,))
decoder_positions = tf.concat(
[tf.range(packed_length - split_point),
tf.range(split_point)], axis=0)
decoder_positions = tf.reshape(decoder_positions, (packed_length,))
decoder_loss_weights = tf.cast(
tf.not_equal(targets, pad_id), dtype=tf.int32)
return {
'encoder_input_tokens': inputs,
'decoder_target_tokens': targets,
'decoder_input_tokens': decoder_input_tokens,
'encoder_segment_ids': encoder_segment_ids,
'encoder_positions': encoder_positions,
'decoder_segment_ids': decoder_segment_ids,
'decoder_positions': decoder_positions,
'decoder_loss_weights': decoder_loss_weights,
}
# Note that the batch requires the lengths to be the same.
return pack_examples(ds.batch(2))
def pack_prefix_lm_decoder_only(ds,
sequence_length,
loss_on_targets_only=True,
pad_id=0):
"""Randomly split the tokens for the prefix LM objective."""
packed_length = next(iter(sequence_length.values()))
assert packed_length % 2 == 0
assert all(l == packed_length for l in sequence_length.values())
@seqio.utils.map_over_dataset(num_seeds=1)
def pack_examples(example, seed):
split_point = tf.random.stateless_uniform((),
minval=1,
maxval=packed_length,
seed=seed,
dtype=tf.int32)
decoder_target_tokens = example['targets']
decoder_input_tokens = seqio.utils.make_autoregressive_inputs(
decoder_target_tokens)
if loss_on_targets_only:
decoder_loss_weights = tf.cast(
tf.range(packed_length) >= split_point, tf.int32)
else:
decoder_loss_weights = tf.ones((packed_length,), dtype=tf.int32)
padding_mask = tf.cast(
tf.not_equal(decoder_target_tokens, pad_id), dtype=tf.int32)
decoder_loss_weights *= padding_mask
decoder_causal_attention = tf.cast(
tf.range(packed_length) <= split_point, tf.int32)
return {
'decoder_target_tokens': decoder_target_tokens,
'decoder_input_tokens': decoder_input_tokens,
'decoder_loss_weights': decoder_loss_weights,
'decoder_causal_attention': decoder_causal_attention,
}
return pack_examples(ds)
| google-research/text-to-text-transfer-transformer | t5/data/preprocessors.py | Python | apache-2.0 | 114,576 |
from numpy import absolute
from numpy import multiply
from numpy import where
from numpy import zeros
from gwlfe.Memoization import memoize
from gwlfe.enums import ETflag
# @memoize #TODO: adding memoization causes this function to not pass the tests
def DailyET(NYrs, DaysMonth, Temp, DayHrs, KV, PcntET, ETFlag):
result = zeros((NYrs, 12, 31))
# CALCULATE ET FROM SATURATED VAPOR PRESSURE,
# HAMON (1961) METHOD
for Y in range(NYrs):
for i in range(12):
for j in range(DaysMonth[Y][i]):
DailyTemp = Temp[Y][i][j]
if ETFlag is ETflag.HAMON_METHOD:
if DailyTemp > 0:
SatVaPressure = (33.8639 * ((0.00738 * DailyTemp +
0.8072) ** 8 - 0.000019 *
absolute(1.8 * DailyTemp + 48) +
0.001316))
PotenET = (0.021 * DayHrs[i] ** 2 * SatVaPressure / (DailyTemp + 273))
ET = KV[i] * PotenET * PcntET[i]
result[Y][i][j] = ET
return result
@memoize
def SatVaPressure(Temp):
return (33.8639 * ((0.00738 * Temp + 0.8072) ** 8 - 0.000019 * absolute(1.8 * Temp + 48) + 0.001316))
@memoize
def PotentET(DayHrs, Temp):
return multiply(0.021 * ((DayHrs ** 2).reshape(12, 1)), SatVaPressure(Temp)) / (Temp + 273)
@memoize
def DailyET_f(Temp, KV, PcntET, DayHrs):
return where(Temp > 0, multiply((KV * PcntET).reshape(12, 1), PotentET(DayHrs, Temp)), 0)
| WikiWatershed/gwlf-e | gwlfe/Input/WaterBudget/ET.py | Python | apache-2.0 | 1,598 |
'''
Created on Jul 29, 2015
@author: Mikhail
'''
import json
import random
import string
import time
import os
__version__ = 2.0
class MyOwnJSONProcessing:
# variables for amount of objects in dictionary (for json)
min_len_of_json_dict = 1
max_len_of_json_dict = 5
# variables for max and min length of keys in dictionary (for json)
min_len_of_key = 1
max_len_of_key = 10
# variable for max value in dictionary (for json)
max_value = 100
@classmethod
def generate_set_of_files_with_json_obj(cls, amount_of_files, is_data_complicated = False):
for dummy_i in xrange(amount_of_files):
if not is_data_complicated:
# we will generate simple data for json file
cls.generate_json_file_with_data(data = cls.generate_data_for_json_obj())
else:
# lets try to generate more complicated data for json file
cls.generate_json_file_with_data(data = cls.generate_complicated_data_for_json_obj())
@classmethod
def generate_data_for_json_obj(cls):
json_data = {}
# generating random key
for dummy_i in range(random.randrange(cls.min_len_of_json_dict, cls.max_len_of_json_dict)):
new_key = cls.randomword(random.randrange(cls.min_len_of_key, cls.max_len_of_key))
new_value = random.randrange(cls.max_value)
if not json_data.has_key(new_key):
json_data[new_key] = new_value
return json_data
@classmethod
def generate_complicated_data_for_json_obj(cls):
raise NotImplementedError
@staticmethod
def generate_json_file_with_data(file_name_template = "data_<timestamp>.json", data = {}):
"""
By default this function generates json file with name that contains time-stamp
when it has been generated
"""
file_name_id = 0
file_name = string.replace(file_name_template, '<timestamp>', str(time.time())) if (string.find(file_name_template, '<timestamp>') != -1) else file_name_template
while os.path.exists(file_name):
file_name_id += 1
file_name = string.replace(file_name_template, '<timestamp>', str(time.time())) if (string.find(file_name_template, '<timestamp>') != -1) else string.replace(file_name_template, ".", str(file_name_id) + ".")
# process the file
with open(file_name, 'w') as f:
json.dump(data, f, indent = 4)
print "File {} has been generated".format(file_name)
return file_name
@staticmethod
def load_data_from_json_file(file_name):
data = {}
with open(file_name, 'r') as f:
data = json.load(f)
return data
@staticmethod
def randomword(length):
return ''.join(random.choice(string.lowercase + string.digits) for dummy_i in range(length))
@staticmethod
def clean_up(dir_with_tests = ".", postfix = ".json"):
"""
This function removes all files in folder from parameters (not from subfolders) with required postfix
@param dir_with_tests: directory when selected files should be removed
@param postfix: postfix for files that should be removed
"""
for name in os.listdir(dir_with_tests):
if name.endswith(postfix):
file_or_dir_name = os.path.join(dir_with_tests, name)
# we should process only files
if os.path.isfile(file_or_dir_name):
os.remove(file_or_dir_name)
print "File {} has been removed...".format(file_or_dir_name)
| MikeLaptev/sandbox_python | mera/unittest_example/json_file_generator.py | Python | apache-2.0 | 3,656 |
import json
from discord.ext import commands
import addons.checks
class Helper_list:
"""
Management of active helpers.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@addons.checks.is_staff("Owner")
@commands.command(pass_context=True)
async def addhelper(self, ctx, user, position):
"""Add user as a helper. Owners only."""
if position not in self.bot.helper_roles:
await self.bot.say("💢 That's not a valid position. You can use __{}__".format("__, __".join(self.bot.helper_roles.keys())))
return
member = ctx.message.mentions[0]
addons.checks.helpers[member.id] = position
with open("data/helpers.json", "w") as f:
json.dump(addons.checks.helpers, f)
await self.bot.add_roles(member, self.bot.helpers_role)
await self.bot.say("{} is now a helper. Welcome to the party room!".format(member.mention, position))
@addons.checks.is_staff("Owner")
@commands.command(pass_context=True)
async def delhelper(self, ctx, user):
"""Remove user from helpers. Owners only."""
member = ctx.message.mentions[0]
await self.bot.say(member.name)
addons.checks.helpers.pop(member.id, None)
with open("data/helpers.json", "w") as f:
json.dump(addons.checks.helpers, f)
await self.bot.remove_roles(member, self.bot.helpers_role, *self.bot.helper_roles.values())
await self.bot.say("{} is no longer a helper. Stop by some time!".format(member.mention))
@addons.checks.is_staff("Helper")
@commands.command(pass_context=True)
async def helpon(self, ctx):
"""Gain highlighted helping role. Only needed by Helpers."""
author = ctx.message.author
if author.id not in addons.checks.helpers:
await self.bot.say("You are not listed as a helper, and can't use this.")
return
await self.bot.add_roles(author, self.bot.helper_roles[addons.checks.helpers[author.id]])
await self.bot.say("{} is now actively helping.".format(author.mention))
msg = "🚑 **Elevated: +Help**: {} | {}#{}".format(author.mention, author.name, author.discriminator)
await self.bot.send_message(self.bot.modlogs_channel, msg)
@addons.checks.is_staff("Helper")
@commands.command(pass_context=True)
async def helpoff(self, ctx):
"""Remove highlighted helping role. Only needed by Helpers."""
author = ctx.message.author
if author.id not in addons.checks.helpers:
await self.bot.say("You are not listed as a helper, and can't use this.")
return
await self.bot.remove_roles(author, self.bot.helper_roles[addons.checks.helpers[author.id]])
await self.bot.say("{} is no longer actively helping!".format(author.mention))
msg = "👎🏻 **De-Elevated: -Help**: {} | {}#{}".format(author.mention, author.name, author.discriminator)
await self.bot.send_message(self.bot.modlogs_channel, msg)
def setup(bot):
bot.add_cog(Helper_list(bot))
| 916253/Kurisu | addons/helper_list.py | Python | apache-2.0 | 3,142 |
# Shared and common functions (declustering redundant code)
import numpy as np, os
import random, cv2
import operator
def get(link, save_as=False):
import urllib
base_dir = './tmp'
assert type(link) == str, type(link)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
if save_as:
save_path = os.path.join(base_dir, save_as)
else:
save_path = os.path.join(base_dir, 'tmp.png')
urllib.urlretrieve(link, save_path)
im = cv2.imread(save_path)[:,:,[2,1,0]]
return im
def softmax(X, theta = 1.0, axis = None):
y = np.atleast_2d(X)
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
y = y * float(theta)
y = y - np.expand_dims(np.max(y, axis = axis), axis)
y = np.exp(y)
ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)
p = y / ax_sum
if len(X.shape) == 1: p = p.flatten()
return p
def sort_dict(d, sort_by='value'):
""" Sorts dictionary """
assert sort_by in ['value', 'key'], sort_by
if sort_by == 'key':
return sorted(d.items(), key=operator.itemgetter(0))
if sort_by == 'value':
return sorted(d.items(), key=operator.itemgetter(1))
def random_crop(im, crop_size, return_crop_loc=False):
""" Randomly crop """
h,w = np.shape(im)[:2]
hSt = random.randint(0, h - crop_size[0])
wSt = random.randint(0, w - crop_size[1])
patch = im[hSt:hSt+crop_size[0], wSt:wSt+crop_size[1], :]
assert tuple(np.shape(patch)[:2]) == tuple(crop_size)
if return_crop_loc:
return patch, (hSt, wSt)
return patch
def process_im(im):
""" Normalizes images into the range [-1.0, 1.0] """
im = np.array(im)
if np.max(im) <= 1:
# PNG format
im = (2.0 * im) - 1.0
else:
# JPEG format
im = 2.0 * (im / 255.) - 1.0
return im
def deprocess_im(im, dtype=None):
""" Map images in [-1.0, 1.0] back to [0, 255] """
im = np.array(im)
return ((255.0 * (im + 1.0))/2.0).astype(dtype)
def random_resize(im_a, im_b, same):
valid_interps = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4, cv2.INTER_AREA]
def get_param():
hr, wr = np.random.choice(np.linspace(0.5, 1.5, 11), 2)
#hr, wr = np.random.uniform(low=0.5, high=1.5, size=2)
interp = np.random.choice(valid_interps)
return [hr, wr, interp]
if same:
if np.random.randint(2):
a_par = get_param()
im_a = cv2.resize(im_a, None, fx=a_par[0], fy=a_par[1], interpolation=a_par[2])
im_b = cv2.resize(im_b, None, fx=a_par[0], fy=a_par[1], interpolation=a_par[2])
else:
a_par = get_param()
im_a = cv2.resize(im_a, None, fx=a_par[0], fy=a_par[1], interpolation=a_par[2])
if np.random.randint(2):
b_par = get_param()
while np.all(a_par == b_par):
b_par = get_param()
im_b = cv2.resize(im_b, None, fx=b_par[0], fy=b_par[1], interpolation=b_par[2])
return im_a, im_b
def random_jpeg(im_a, im_b, same):
def get_param():
#jpeg_quality_a = np.random.randint(50, 100) # doesnt include 100
return np.random.choice(np.linspace(50, 100, 11))
if same:
if np.random.randint(2):
a_par = get_param()
_, enc_a = cv2.imencode('.jpg', im_a, [int(cv2.IMWRITE_JPEG_QUALITY), a_par])
im_a = cv2.imdecode(enc_a, 1)
_, enc_b = cv2.imencode('.jpg', im_b, [int(cv2.IMWRITE_JPEG_QUALITY), a_par])
im_b = cv2.imdecode(enc_b, 1)
else:
a_par = get_param()
_, enc_a = cv2.imencode('.jpg', im_a, [int(cv2.IMWRITE_JPEG_QUALITY), a_par])
im_a = cv2.imdecode(enc_a, 1)
if np.random.randint(2):
b_par = get_param()
while np.all(a_par == b_par):
b_par = get_param()
_, enc_b = cv2.imencode('.jpg', im_b, [int(cv2.IMWRITE_JPEG_QUALITY), b_par])
im_b = cv2.imdecode(enc_b, 1)
return im_a, im_b
def gaussian_blur(im, kSz=None, sigma=1.0):
# 5x5 kernel blur
if kSz is None:
kSz = np.ceil(3.0 * sigma)
kSz = kSz + 1 if kSz % 2 == 0 else kSz
kSz = max(kSz, 3) # minimum kernel size
kSz = int(kSz)
blur = cv2.GaussianBlur(im,(kSz,kSz), sigma)
return blur
def random_blur(im_a, im_b, same):
# only square gaussian kernels
def get_param():
kSz = (2 * np.random.randint(1, 8)) + 1 # [3, 15]
sigma = np.random.choice(np.linspace(1.0, 5.0, 9))
#sigma = np.random.uniform(low=1.0, high=5.0, size=None) # 3 * sigma = kSz
return [kSz, sigma]
if same:
if np.random.randint(2):
a_par = get_param()
im_a = cv2.GaussianBlur(im_a, (a_par[0], a_par[0]), a_par[1])
im_b = cv2.GaussianBlur(im_b, (a_par[0], a_par[0]), a_par[1])
else:
a_par = get_param()
im_a = cv2.GaussianBlur(im_a, (a_par[0], a_par[0]), a_par[1])
if np.random.randint(2):
b_par = get_param()
while np.all(a_par == b_par):
b_par = get_param()
im_b = cv2.GaussianBlur(im_b, (b_par[0], b_par[0]), b_par[1])
return im_a, im_b
def random_noise(im):
noise = np.random.randn(*np.shape(im)) * 10.0
return np.array(np.clip(noise + im, 0, 255.0), dtype=np.uint8)
| minyoungg/selfconsistency | lib/utils/util.py | Python | apache-2.0 | 5,391 |
'''
@author: Dallas Fraser
@date: 2015-08-25
@organization: MLSB API
@summary: The basic league API
'''
from flask_restful import Resource, reqparse
from flask import Response, request
from json import dumps
from api import DB
from api.model import League
from api.authentication import requires_admin
from api.errors import LeagueDoesNotExist
from api.variables import PAGE_SIZE
from api.routes import Routes
from api.helper import pagination_response
from api.cached_items import handle_table_change
from api.tables import Tables
parser = reqparse.RequestParser()
parser.add_argument('league_name', type=str)
post_parser = reqparse.RequestParser()
post_parser.add_argument('league_name', type=str, required=True)
class LeagueAPI(Resource):
def get(self, league_id):
"""
GET request for League Object matching given league_id
Route: Route['league']/<league_id: int>
Returns:
if found
status: 200
mimetype: application/json
data: {league_id:int, league_name:string}
otherwise
status: 404
mimetype: application/json
data: None
"""
# expose a single League
entry = League.query.get(league_id)
if entry is None:
raise LeagueDoesNotExist(payload={'details': league_id})
response = Response(dumps(entry.json()), status=200,
mimetype="application/json")
return response
@requires_admin
def delete(self, league_id):
"""
DELETE request for League
Route: Route['league']/<league_id: int>
Returns:
if found
status: 200
mimetype: application/json
data: None
otherwise
status: 404
mimetype: application/json
data: None
"""
# delete a single user
league = League.query.get(league_id)
if league is None:
raise LeagueDoesNotExist(payload={'details': league_id})
DB.session.delete(league)
DB.session.commit()
response = Response(dumps(None), status=200,
mimetype="application/json")
handle_table_change(Tables.LEAGUE, item=league.json())
return response
@requires_admin
def put(self, league_id):
"""
PUT request for league
Route: Route['league']/<league_id: int>
Parameters :
league_name: The league's name (string)
Returns:
if found and successful
status: 200
mimetype: application/json
data: None
if found but not successful
status: IFSC
mimetype: application/json
data: None
otherwise
status: 404
mimetype: application/json
data: None
"""
# update a single user
args = parser.parse_args()
league = League.query.get(league_id)
league_name = None
if league is None:
raise LeagueDoesNotExist(payload={'details': league_id})
if args['league_name']:
league_name = args['league_name']
league.update(league_name)
DB.session.commit()
response = Response(dumps(None), status=200,
mimetype="application/json")
handle_table_change(Tables.LEAGUE, item=league.json())
return response
def option(self):
return {'Allow': 'PUT'}, 200, \
{'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'PUT,GET'}
class LeagueListAPI(Resource):
def get(self):
"""
GET request for League List
Route: Route['league']
Parameters :
league_name: The league's name (string)
Returns:
status: 200
mimetype: application/json
data:
tournaments: [{league_id:int,
league_name:string,
},{...}
]
"""
# return a pagination of leagues
page = request.args.get('page', 1, type=int)
pagination = League.query.paginate(page, PAGE_SIZE, False)
result = pagination_response(pagination, Routes['league'])
resp = Response(dumps(result), status=200,
mimetype="application/json")
return resp
@requires_admin
def post(self):
"""
POST request for League List
Route: Route['league']
Parameters :
league_name: The league's name (string)
Returns:
if successful
status: 200
mimetype: application/json
data: the created user league id (int)
if missing required parameter
status: 400
mimetype: application/json
data: the created user league id (int)
if invalid parameter
status: IFSC
mimetype: application/json
data: the created user league id (int)
"""
# create a new user
args = post_parser.parse_args()
league_name = None
if args['league_name']:
league_name = args['league_name']
league = League(league_name)
DB.session.add(league)
DB.session.commit()
result = league.id
handle_table_change(Tables.LEAGUE, item=league.json())
return Response(dumps(result), status=201,
mimetype="application/json")
def option(self):
return {'Allow': 'PUT'}, 200, \
{'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'PUT,GET'}
| fras2560/mlsb-platform | api/basic/league.py | Python | apache-2.0 | 6,180 |
# -*- coding: utf-8 -*-
#
# Copyright 2015 MarkLogic Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File History
# ------------
#
# Norman Walsh 05/08/2015 Initial development
"""
Classes for dealing with path namespaces
"""
class PathNamespace:
"""
A database path namespace.
"""
def __init__(self, prefix, namespace_uri):
"""
Create a path namespace.
:param prefix: The prefix to use in field (i.e. 'foo')
:param namespace: The namespace uri (i.e. 'http://bar.com')
"""
self._config = {
'prefix': prefix,
'namespace-uri': namespace_uri
}
def prefix(self):
"""
The prefix.
"""
return self._config['prefix']
def set_prefix(self, prefix):
"""
Set the prefix.
"""
self._config['prefix'] = prefix
return self
def namespace_uri(self):
"""
The namespace URI.
"""
return self._config['namespace-uri']
def set_namespace_uri(self, namespace_uri):
"""
Set the namespace URI.
"""
self._config['namespace-uri'] = namespace_uri
return self
| supriyantomaftuh/python_api | python_api/marklogic/models/database/path.py | Python | apache-2.0 | 1,718 |
# -*- coding: utf-8 -*-
from brutal.core.plugin import cmd
from datetime import datetime
import hladnymatfyzak
DATE_FORMAT = '%d.%m.%Y'
def validate_date_and_args(args):
"""Validates given date"""
if len(args) >= 1:
try:
valid = datetime.strptime(args[0], DATE_FORMAT)
except ValueError:
return None
return datetime(day=valid.day, month=valid.month, year=valid.year)
return datetime.today()
def output_meals(meals):
"""Returns string of meals"""
out = ''
for i, meal in enumerate(meals, start=1):
out += '{0}. {1}€ '.format(i, meal)
out += '\n' if i % 3 == 0 else ''
return out
@cmd
def horna(event):
"""Meals available in horna
Examples:
!horna 24.12.2015
"""
date_obj = validate_date_and_args(event.args)
if date_obj is None:
return 'Invalid date'
meals = hladnymatfyzak.horna(day=date_obj.day, month=date_obj.month,
year=date_obj.year)
return output_meals(meals)
@cmd
def dolna(event):
"""Meals available in dolna
Examples:
!dolna 24.12.2015
"""
date_obj = validate_date_and_args(event.args)
if date_obj is None:
return 'Invalid date'
meals = hladnymatfyzak.dolna(day=date_obj.day, month=date_obj.month,
year=date_obj.year)
return output_meals(meals)
@cmd
def faynfood(event):
"""Meals available in Faynfood
Examples:
!faynfood 24.12.2015
"""
date_obj = validate_date_and_args(event.args)
if date_obj is None:
return 'Invalid date'
meals = hladnymatfyzak.ffood('faynfood', day=date_obj.day,
month=date_obj.month, year=date_obj.year)
return output_meals(meals)
@cmd
def freefood(event):
"""Meals available in Freefood
Examples:
!freefood 24.12.2015
"""
date_obj = validate_date_and_args(event.args)
if date_obj is None:
return 'Invalid date'
meals = hladnymatfyzak.ffood('freefood', day=date_obj.day,
month=date_obj.month, year=date_obj.year)
return output_meals(meals)
| mrshu/brutal-plugins | hladny_matfyzak.py | Python | apache-2.0 | 2,198 |
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Commands related to routers.
"""
import argparse
import subprocess
import sys
from akanda.rug import commands
from akanda.rug.cli import message
from akanda.rug.api import nova, quantum
from novaclient import exceptions
from oslo.config import cfg
from neutronclient.v2_0 import client
class _TenantRouterCmd(message.MessageSending):
def get_parser(self, prog_name):
# Bypass the direct base class to let us put the tenant id
# argument first
p = super(_TenantRouterCmd, self).get_parser(prog_name)
p.add_argument(
'router_id',
)
return p
def make_message(self, parsed_args):
router_id = parsed_args.router_id.lower()
if router_id == 'error':
tenant_id = 'error'
elif router_id == '*':
tenant_id = '*'
else:
# Look up the tenant for a given router so we can send the
# command using both and the rug can route it to the correct
# worker. We do the lookup here instead of in the rug to avoid
# having to teach the rug notification and dispatching code
# about how to find the owner of a router, and to shift the
# burden of the neutron API call to the client so the server
# doesn't block. It also gives us a chance to report an error
# when we can't find the router.
n_c = client.Client(
username=self.app.rug_ini.admin_user,
password=self.app.rug_ini.admin_password,
tenant_name=self.app.rug_ini.admin_tenant_name,
auth_url=self.app.rug_ini.auth_url,
auth_strategy=self.app.rug_ini.auth_strategy,
region_name=self.app.rug_ini.auth_region,
)
response = n_c.list_routers(retrieve_all=True, id=router_id)
try:
router_details = response['routers'][0]
except (KeyError, IndexError):
raise ValueError('No router with id %r found: %s' %
(router_id, response))
assert router_details['id'] == router_id
tenant_id = router_details['tenant_id']
self.log.info(
'sending %s instruction for tenant %r, router %r',
self._COMMAND,
tenant_id,
router_id,
)
return {
'command': self._COMMAND,
'router_id': router_id,
'tenant_id': tenant_id,
}
class RouterUpdate(_TenantRouterCmd):
"""force-update a router"""
_COMMAND = commands.ROUTER_UPDATE
class RouterRebuild(_TenantRouterCmd):
"""force-rebuild a router"""
_COMMAND = commands.ROUTER_REBUILD
def get_parser(self, prog_name):
p = super(RouterRebuild, self).get_parser(prog_name)
p.add_argument(
'--router_image_uuid',
)
return p
def take_action(self, parsed_args):
uuid = parsed_args.router_image_uuid
if uuid:
nova_client = nova.Nova(cfg.CONF).client
try:
nova_client.images.get(uuid)
except exceptions.NotFound:
self.log.exception(
'could not retrieve custom image %s from Glance:' % uuid
)
raise
return super(RouterRebuild, self).take_action(parsed_args)
def make_message(self, parsed_args):
message = super(RouterRebuild, self).make_message(parsed_args)
message['router_image_uuid'] = parsed_args.router_image_uuid
return message
class RouterDebug(_TenantRouterCmd):
"""debug a single router"""
_COMMAND = commands.ROUTER_DEBUG
class RouterManage(_TenantRouterCmd):
"""manage a single router"""
_COMMAND = commands.ROUTER_MANAGE
class RouterSSH(_TenantRouterCmd):
"""ssh into a router over the management network"""
def get_parser(self, prog_name):
p = super(RouterSSH, self).get_parser(prog_name)
p.add_argument('remainder', nargs=argparse.REMAINDER)
return p
def take_action(self, parsed_args):
n_c = client.Client(
username=self.app.rug_ini.admin_user,
password=self.app.rug_ini.admin_password,
tenant_name=self.app.rug_ini.admin_tenant_name,
auth_url=self.app.rug_ini.auth_url,
auth_strategy=self.app.rug_ini.auth_strategy,
region_name=self.app.rug_ini.auth_region,
)
router_id = parsed_args.router_id.lower()
ports = n_c.show_router(router_id).get('router', {}).get('ports', {})
for port in ports:
if port['fixed_ips'] and \
port['device_owner'] == quantum.DEVICE_OWNER_ROUTER_MGT:
v6_addr = port['fixed_ips'].pop()['ip_address']
try:
cmd = ["ssh", "root@%s" % v6_addr] + parsed_args.remainder
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
| markmcclain/astara | akanda/rug/cli/router.py | Python | apache-2.0 | 5,687 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from heat.common import exception
from heat.common import short_id
from heat.common import template_format
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import node_data
from heat.engine.resources.aws.iam import user
from heat.engine.resources.openstack.heat import access_policy as ap
from heat.engine import scheduler
from heat.engine import stk_defn
from heat.objects import resource_data as resource_data_object
from heat.tests import common
from heat.tests import utils
user_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User"
}
}
}
'''
user_template_password = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User",
"Properties": {
"LoginProfile": { "Password": "myP@ssW0rd" }
}
}
}
}
'''
user_accesskey_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User"
},
"HostKeys" : {
"Type" : "AWS::IAM::AccessKey",
"Properties" : {
"UserName" : {"Ref": "CfnUser"}
}
}
}
}
'''
user_policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a User",
"Parameters" : {},
"Resources" : {
"CfnUser" : {
"Type" : "AWS::IAM::User",
"Properties" : {
"Policies" : [ { "Ref": "WebServerAccessPolicy"} ]
}
},
"WebServerAccessPolicy" : {
"Type" : "OS::Heat::AccessPolicy",
"Properties" : {
"AllowedResources" : [ "WikiDatabase" ]
}
},
"WikiDatabase" : {
"Type" : "AWS::EC2::Instance",
}
}
}
'''
class UserTest(common.HeatTestCase):
def setUp(self):
super(UserTest, self).setUp()
self.stack_name = 'test_user_stack_%s' % utils.random_name()
self.username = '%s-CfnUser-aabbcc' % self.stack_name
self.fc = fake_ks.FakeKeystoneClient(username=self.username)
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
def create_user(self, t, stack, resource_name,
project_id, user_id='dummy_user',
password=None):
self.patchobject(user.User, 'keystone', return_value=self.fc)
self.mock_create_project = self.patchobject(
fake_ks.FakeKeystoneClient, 'create_stack_domain_project',
return_value=project_id)
resource_defns = stack.t.resource_definitions(stack)
rsrc = user.User(resource_name,
resource_defns[resource_name],
stack)
rsrc.store()
self.patchobject(short_id, 'get_id', return_value='aabbcc')
self.mock_create_user = self.patchobject(
fake_ks.FakeKeystoneClient, 'create_stack_domain_user',
return_value=user_id)
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_user(self):
t = template_format.parse(user_template)
stack = utils.parse_stack(t, stack_name=self.stack_name)
project_id = 'stackproject'
rsrc = self.create_user(t, stack, 'CfnUser', project_id)
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'Foo')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertIsNone(rsrc.handle_suspend())
self.assertIsNone(rsrc.handle_resume())
rsrc.resource_id = None
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
rsrc.resource_id = self.fc.access
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE)
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
rsrc.state_set(rsrc.CREATE, rsrc.COMPLETE)
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.mock_create_project.assert_called_once_with(stack.id)
self.mock_create_user.assert_called_once_with(
password=None, project_id=project_id,
username=self.username)
def test_user_password(self):
t = template_format.parse(user_template_password)
stack = utils.parse_stack(t, stack_name=self.stack_name)
project_id = 'stackproject'
password = u'myP@ssW0rd'
rsrc = self.create_user(t, stack, 'CfnUser',
project_id=project_id,
password=password)
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.mock_create_project.assert_called_once_with(stack.id)
self.mock_create_user.assert_called_once_with(
password=password, project_id=project_id,
username=self.username)
def test_user_validate_policies(self):
t = template_format.parse(user_policy_template)
stack = utils.parse_stack(t, stack_name=self.stack_name)
project_id = 'stackproject'
rsrc = self.create_user(t, stack, 'CfnUser', project_id)
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual([u'WebServerAccessPolicy'],
rsrc.properties['Policies'])
# OK
self.assertTrue(rsrc._validate_policies([u'WebServerAccessPolicy']))
# Resource name doesn't exist in the stack
self.assertFalse(rsrc._validate_policies([u'NoExistAccessPolicy']))
# Resource name is wrong Resource type
self.assertFalse(rsrc._validate_policies([u'NoExistAccessPolicy',
u'WikiDatabase']))
# Wrong type (AWS embedded policy format, not yet supported)
dict_policy = {"PolicyName": "AccessForCFNInit",
"PolicyDocument":
{"Statement": [{"Effect": "Allow",
"Action":
"cloudformation:DescribeStackResource",
"Resource": "*"}]}}
# However we should just ignore it to avoid breaking existing templates
self.assertTrue(rsrc._validate_policies([dict_policy]))
self.mock_create_project.assert_called_once_with(stack.id)
self.mock_create_user.assert_called_once_with(
password=None, project_id=project_id,
username=self.username)
def test_user_create_bad_policies(self):
t = template_format.parse(user_policy_template)
t['Resources']['CfnUser']['Properties']['Policies'] = ['NoExistBad']
stack = utils.parse_stack(t, stack_name=self.stack_name)
resource_name = 'CfnUser'
resource_defns = stack.t.resource_definitions(stack)
rsrc = user.User(resource_name,
resource_defns[resource_name],
stack)
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.handle_create)
def test_user_access_allowed(self):
def mock_access_allowed(resource):
return True if resource == 'a_resource' else False
self.patchobject(ap.AccessPolicy, 'access_allowed',
side_effect=mock_access_allowed)
t = template_format.parse(user_policy_template)
stack = utils.parse_stack(t, stack_name=self.stack_name)
project_id = 'stackproject'
rsrc = self.create_user(t, stack, 'CfnUser', project_id)
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertTrue(rsrc.access_allowed('a_resource'))
self.assertFalse(rsrc.access_allowed('b_resource'))
self.mock_create_project.assert_called_once_with(stack.id)
self.mock_create_user.assert_called_once_with(
password=None, project_id=project_id,
username=self.username)
def test_user_access_allowed_ignorepolicy(self):
def mock_access_allowed(resource):
return True if resource == 'a_resource' else False
self.patchobject(ap.AccessPolicy, 'access_allowed',
side_effect=mock_access_allowed)
t = template_format.parse(user_policy_template)
t['Resources']['CfnUser']['Properties']['Policies'] = [
'WebServerAccessPolicy', {'an_ignored': 'policy'}]
stack = utils.parse_stack(t, stack_name=self.stack_name)
project_id = 'stackproject'
rsrc = self.create_user(t, stack, 'CfnUser', project_id)
self.assertEqual('dummy_user', rsrc.resource_id)
self.assertEqual(self.username, rsrc.FnGetRefId())
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertTrue(rsrc.access_allowed('a_resource'))
self.assertFalse(rsrc.access_allowed('b_resource'))
self.mock_create_project.assert_called_once_with(stack.id)
self.mock_create_user.assert_called_once_with(
password=None, project_id=project_id,
username=self.username)
def test_user_refid_rsrc_id(self):
t = template_format.parse(user_template)
stack = utils.parse_stack(t)
rsrc = stack['CfnUser']
rsrc.resource_id = 'phy-rsrc-id'
self.assertEqual('phy-rsrc-id', rsrc.FnGetRefId())
def test_user_refid_convg_cache_data(self):
t = template_format.parse(user_template)
cache_data = {'CfnUser': node_data.NodeData.from_dict({
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'reference_id': 'convg_xyz'
})}
stack = utils.parse_stack(t, cache_data=cache_data)
rsrc = stack.defn['CfnUser']
self.assertEqual('convg_xyz', rsrc.FnGetRefId())
class AccessKeyTest(common.HeatTestCase):
def setUp(self):
super(AccessKeyTest, self).setUp()
self.username = utils.PhysName('test_stack', 'CfnUser')
self.credential_id = 'acredential123'
self.fc = fake_ks.FakeKeystoneClient(username=self.username,
user_id='dummy_user',
credential_id=self.credential_id)
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
def create_user(self, t, stack, resource_name,
project_id='stackproject', user_id='dummy_user',
password=None):
self.patchobject(user.User, 'keystone', return_value=self.fc)
rsrc = stack[resource_name]
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
stk_defn.update_resource_data(stack.defn, resource_name,
rsrc.node_data())
return rsrc
def create_access_key(self, t, stack, resource_name):
rsrc = stack[resource_name]
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
def test_access_key(self):
t = template_format.parse(user_accesskey_template)
stack = utils.parse_stack(t)
self.create_user(t, stack, 'CfnUser')
rsrc = self.create_access_key(t, stack, 'HostKeys')
self.assertEqual(self.fc.access,
rsrc.resource_id)
self.assertEqual(self.fc.secret,
rsrc._secret)
# Ensure the resource data has been stored correctly
rs_data = resource_data_object.ResourceData.get_all(rsrc)
self.assertEqual(self.fc.secret, rs_data.get('secret_key'))
self.assertEqual(self.fc.credential_id, rs_data.get('credential_id'))
self.assertEqual(2, len(rs_data.keys()))
self.assertEqual(utils.PhysName(stack.name, 'CfnUser'),
rsrc.FnGetAtt('UserName'))
rsrc._secret = None
self.assertEqual(self.fc.secret,
rsrc.FnGetAtt('SecretAccessKey'))
self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'Foo')
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
def test_access_key_get_from_keystone(self):
self.patchobject(user.AccessKey, 'keystone', return_value=self.fc)
t = template_format.parse(user_accesskey_template)
stack = utils.parse_stack(t)
self.create_user(t, stack, 'CfnUser')
rsrc = self.create_access_key(t, stack, 'HostKeys')
# Delete the resource data for secret_key, to test that existing
# stacks which don't have the resource_data stored will continue
# working via retrieving the keypair from keystone
resource_data_object.ResourceData.delete(rsrc, 'credential_id')
resource_data_object.ResourceData.delete(rsrc, 'secret_key')
self.assertRaises(exception.NotFound,
resource_data_object.ResourceData.get_all,
rsrc)
rsrc._secret = None
rsrc._data = None
self.assertEqual(self.fc.secret,
rsrc.FnGetAtt('SecretAccessKey'))
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
def test_access_key_no_user(self):
t = template_format.parse(user_accesskey_template)
# Set the resource properties UserName to an unknown user
t['Resources']['HostKeys']['Properties']['UserName'] = 'NonExistent'
stack = utils.parse_stack(t)
stack['CfnUser'].resource_id = self.fc.user_id
resource_defns = stack.t.resource_definitions(stack)
rsrc = user.AccessKey('HostKeys',
resource_defns['HostKeys'],
stack)
create = scheduler.TaskRunner(rsrc.create)
self.assertRaises(exception.ResourceFailure, create)
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
class AccessPolicyTest(common.HeatTestCase):
def test_accesspolicy_create_ok(self):
t = template_format.parse(user_policy_template)
stack = utils.parse_stack(t)
resource_name = 'WebServerAccessPolicy'
resource_defns = stack.t.resource_definitions(stack)
rsrc = ap.AccessPolicy(resource_name,
resource_defns[resource_name],
stack)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
def test_accesspolicy_create_ok_empty(self):
t = template_format.parse(user_policy_template)
resource_name = 'WebServerAccessPolicy'
t['Resources'][resource_name]['Properties']['AllowedResources'] = []
stack = utils.parse_stack(t)
resource_defns = stack.t.resource_definitions(stack)
rsrc = ap.AccessPolicy(resource_name,
resource_defns[resource_name],
stack)
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
def test_accesspolicy_create_err_notfound(self):
t = template_format.parse(user_policy_template)
resource_name = 'WebServerAccessPolicy'
t['Resources'][resource_name]['Properties']['AllowedResources'] = [
'NoExistResource']
stack = utils.parse_stack(t)
self.assertRaises(exception.StackValidationFailed, stack.validate)
def test_accesspolicy_access_allowed(self):
t = template_format.parse(user_policy_template)
resource_name = 'WebServerAccessPolicy'
stack = utils.parse_stack(t)
resource_defns = stack.t.resource_definitions(stack)
rsrc = ap.AccessPolicy(resource_name,
resource_defns[resource_name],
stack)
self.assertTrue(rsrc.access_allowed('WikiDatabase'))
self.assertFalse(rsrc.access_allowed('NotWikiDatabase'))
self.assertFalse(rsrc.access_allowed(None))
| noironetworks/heat | heat/tests/aws/test_user.py | Python | apache-2.0 | 17,881 |
from collections import defaultdict
from copy import deepcopy
from geopy.geocoders import Nominatim
import Util
import twitter
import json
import time
import string
import stop_words
geolocator = Nominatim()
STOP_WORDS = stop_words.get_stop_words('english')
api = twitter.Api(consumer_key='b170h2arKC4VoITriN5jIjFRN',
consumer_secret='z2npapLunYynvp9E783KsTiTMUR4CE6jgGIFqXOdzmXNkYI7g9',
access_token_key='3842613073-L7vq82QRYRGCbO1kzN9bYfjfbbV7kOpWWLYnBGG',
access_token_secret='FU6AJWG4iDHfzQWhjKB1r3SIwoyzTcgFe0LjyNfq8r6aR')
global cached_query_results = {}
global cached_user_results = {}
def search_tweets(query, max_searches=5, override_cache=False):
"""Searches for tweets that match query.
Args:
query: The search query string. Can be a phrase or hashtag.
See https://dev.twitter.com/rest/reference/get/search/tweets
max_searches: The maximum number of API searches that will be
executed for the given query. Default value is 5 searches.
100 tweets can be obtained per API search, so by default
a maximum of 500 tweets will be returned.
override_cache: Whether to execute a search even if there is
already a cached result for the query. Defaults to False.
Returns:
A list of tweet objects matching the query with most recent
tweets first.
Raises:
UserWarning: If override_cache is set to False and result for
input query has already been cached.
"""
if query in cached_query_results and override_cache is not False:
raise UserWarning('input query {0} is already in '
'cached_query_results'.format(query))
remaining_timeout = api.GetSleepTime('/search/tweets')
if remaining_timeout != 0:
print ('searchTweets() must wait {0} seconds in order to not exceed '
'the Twitter API rate limit.'.format(remaining_timeout + 1))
time.sleep(remaining_timeout + 1)
result = []
search_result = api.GetSearch(term=query, count=100) # could also add lang='en'
result.extend(search_result)
oldest_tweet_id = min([t.GetId() for t in search_result])
num_searches = 1
while len(search_result) == 100 and num_searches < max_searches:
search_result = _search_tweets_aux(query, oldest_tweet_id)
oldest_tweet_id = min([t.GetId() for t in search_result])
result.extend(search_result)
num_searches += 1
global cached_query_results
cached_query_results[query] = result
return result
def _search_tweets_aux(query, max_tweet_id):
"""Auxiliary helper function for search_tweets."""
remaining_timeout = api.GetSleepTime('/search/tweets')
if remaining_timeout != 0:
print ('searchTweets() must wait {0} seconds in order to not exceed '
'the Twitter API rate limit.'.format(remaining_timeout + 1))
time.sleep(remaining_timeout + 1)
search_result = api.GetSearch(term=query, count=100, max_id=max_tweet_id - 1)
return search_result
def get_coordinate_list(tweets):
"""Gets list of (longitude, latitude) tuples for tweets in list.
Args:
tweets: List of tweet objects to extract geo coordinates from.
Will ignore tweets in list for which geo coordinates cannot
be extracted.
Returns:
List of (longitude, latitude) tuples for tweets in list.
"""
coord_list = []
for tweet in tweets:
coords = get_coordinates(tweet)
if coords:
coord_list.append(coords)
return coord_list
def get_coordinates(tweet):
"""Gets longitude and latitude of tweet.
Args:
tweet: The tweet object to extract geo coordinates from.
Returns:
Tuple of (longitude, latitude) for the input tweet. Returns
False if unable to extract geo coordinates for tweet.
"""
# try to get tweet geo coordinates directly if available
coordinates = tweet.GetCoordinates()
if coordinates:
return coordinates
# otherwise parase geo coordinates form user location if available
location = tweet.user.location
if location:
coordinates = geolocator.geocode(location)
if coordinates:
return coordinates.longitude, coordinates.latitude
# not able to extract geo coordinates, so return False
return False
def no_duplicate_tweets(tweets):
"""Returns True iff tweets in input list are all unique."""
ids = set()
for tweet in tweets:
tweet_id = tweet.GetId()
if tweet_id in ids:
return False
ids.add(tweet_id)
return True
def tweets_to_text_strings(tweets):
"""Converts list of tweets to list of tweet text strings."""
return [tweet.GetText() for tweet in tweets]
def tweets_to_word_counter(tweets, normalize=False, lowercase=True):
"""Converts list of tweets to dict of word counts.
Args:
tweets: List of tweet objects to process.
normalize: Whether to return frequencies instead of counts.
Default value is False (return counts).
lowercase: Whether to convert all words to lowercase.
Default value if True.
Returns:
util.Counter object containing counts of words in the tweets.
Words are keys, counts are values. If normalize is set to True,
then function will return word frequencies as values.
"""
word_counter = util.Counter()
for tweet in tweets:
word_counter += string_to_nonstopword_counter(tweet.GetText())
if normalize:
word_counter.normalize()
return word_counter
def string_to_nonstopword_list(text):
"""Returns list of non-stopwords in string.
Args:
text: The string to process.
Returns:
List of non-stopwords in text string. Punctuation, whitespace,
and hyperlinks are removed. Hashtag and @USERNAME punctionation
is not removed.
"""
# split strings into words and remove whitespace:
words = text.split()
# remove non-hashtag and non-username punctionation:
chars_to_remove = list(deepcopy(string.punctuation))
chars_to_remove.remove('#')
chars_to_remove.remove('@')
chars_to_remove = ''.join(chars_to_remove)
words = [word.strip(chars_to_remove) for word in words]
# remove empty strings:
words = [word for word in words if word]
# remove stopwords:
words = filter(lambda w: w.lower() not in STOP_WORDS, words)
# remove hyperlinks:
words = filter(lambda w: not (len(w) > 7 and w[0:9] == 'https://'), words)
# remove non ascii characters:
to_return = []
for word in words:
valid = True
for char in word:
if char not in string.printable:
valid = False
break
if valid:
to_return.append(word)
return to_return
def string_to_nonstopword_counter(text, lowercase=True):
"""Converts string to util.Counter of non-stopwords in text string.
Args:
text: The string to process.
lowercase: Whether the convert the words in the string to lowercase.
Returns:
util.Counter object containing counts of non-stopwords in string.
Punctuation, whitespace, and hyperlinks are removed. Hashtag
and @USERNAME punctionation is not removed.
"""
words = string_to_nonstopword_list(text)
word_counter = util.Counter()
for word in words:
if lowercase:
word = word.lower()
word_counter[word] += 1
return word_counter
def get_user_tweets(username, max_searches=5, override_cache=False):
"""Searches for tweets that match query.
Args:
username: The username of the Twitter account that tweets will
be downloaded for.
max_searches: The maximum number of API searches that will be
executed for the given user. Default value is 5 searches.
200 tweets can be obtained per API search, so by default
a maximum of 1000 tweets will be returned.
override_cache: Whether to execute a search even if there is
already a cached result for the specifed Twitter user.
Defaults to False.
Returns:
A list of tweet objects corresponding to the specified users's
public tweets, with their most recent tweets first.
"""
if username in cached_user_results and override_cache is not False:
raise UserWarning('input username {0} is already in '
'cached_user_results'.format(query))
remaining_timeout = api.GetSleepTime('/search/tweets') # might need to change this
if remaining_timeout != 0:
print ('searchTweets() must wait {0} seconds in order to not exceed '
'the Twitter API rate limit.'.format(remaining_timeout + 1))
time.sleep(remaining_timeout + 1)
result = []
search_result = api.GetUserTimeline(screen_name=username, count=200) # could also add lang='en'
result.extend(search_result)
oldest_tweet_id = min([t.GetId() for t in search_result])
num_searches = 1
while len(search_result) == 200 and num_searches < max_searches:
search_result = _get_user_tweets_aux(username, oldest_tweet_id)
if not search_result:
break
oldest_tweet_id = min([t.GetId() for t in search_result])
result.extend(search_result)
num_searches += 1
global cached_user_results
cached_user_results[username] = result
return result
def _get_user_tweets_aux(username, max_tweet_id):
"""Auxiliary helper function for search_tweets."""
remaining_timeout = api.GetSleepTime('/search/tweets')
if remaining_timeout != 0:
print ('searchTweets() must wait {0} seconds in order to not exceed '
'the Twitter API rate limit.'.format(remaining_timeout + 1))
time.sleep(remaining_timeout + 1)
search_result = api.GetUserTimeline(screen_name=username, count=200,
max_id=max_tweet_id - 1)
return search_result
def split_words_hashtags_usermentions(word_counter):
"""Splits all words into words, hashtags, and usermentions counters."""
pure_word_counter = util.Counter()
hashtag_counter = util.Counter()
usermentions_counter = util.Counter()
for word in word_counter:
if word[0] == '#':
hashtag_counter[word] = word_counter[word]
elif word[0] == '@':
usermentions_counter[word] = word_counter[word]
else:
pure_word_counter[word] = word_counter[word]
return pure_word_counter, hashtag_counter, usermentions_counter
| ZacWilson047/TwitterProject | TwitterModule.py | Python | apache-2.0 | 10,730 |
from functools import wraps
def authorized_method(o):
o.authentication_required = o.slug
o.authorization_required = o.slug
return o
def authenticated_method(o):
o.authentication_required = o.slug
return o
def anonymous_method(o):
o.authentication_required = False
o.authorization_required = False
return o
class authorization_required(object):
"""
Class decorator for documents, collections, applications that require authorization to access.
Adds authentication_required and authorization_required attributes to the decorated class at a minimum. It is also
possible to specify a filter function that filters documents based on a user's authentication information and
each individual document. This is achieved via rethinkdb's filter API and must use rethinkdb predicates. This should
be a nested function::
def example_filter_function(auth_info, method):
username = auth_info.username
permission = 'can_' + method
return lambda(doc): \
doc[permission].contains(username)
Args:
*protected (str): Items should be 'read', 'write', or the name of a method
filter_function (function): Should be a function that accepts a decoded auth token and an access method, then
returns another function. The second function should accept a document instance and return True or False
whether the user has access to that document.
"""
def __init__(self, *protected, filter_function=None):
self.protected = protected
self.filter_function = filter_function
def __call__(self, cls):
cls.authentication_required = self.protected
cls.authorization_required = self.protected
if self.filter_function:
cls.document_level_authorization = True
cls.authorization_filter = self.filter_function
else:
cls.document_level_authorization = False
return cls
class authentication_required(object):
def __init__(self, *protected):
self.protected = protected
def __call__(self, cls):
cls.authentication_required = self.protected
return cls
| JeffHeard/sondra | sondra/auth/decorators.py | Python | apache-2.0 | 2,213 |
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from pyramid import testing
from paasta_tools.api.views import pause_autoscaler
def test_get_service_autoscaler_pause():
with mock.patch(
'paasta_tools.utils.KazooClient',
autospec=True,
) as mock_zk, mock.patch(
'paasta_tools.utils.load_system_paasta_config',
autospec=True,
):
request = testing.DummyRequest()
mock_zk_get = mock.Mock(return_value=(b'100', None))
mock_zk.return_value = mock.Mock(get=mock_zk_get)
response = pause_autoscaler.get_service_autoscaler_pause(request)
mock_zk_get.assert_called_once_with('/autoscaling/paused')
assert response == '100'
def test_update_autoscaler_pause():
with mock.patch(
'paasta_tools.utils.KazooClient',
autospec=True,
) as mock_zk, mock.patch(
'paasta_tools.api.views.pause_autoscaler.time',
autospec=True,
) as mock_time, mock.patch(
'paasta_tools.utils.load_system_paasta_config',
autospec=True,
):
request = testing.DummyRequest()
request.swagger_data = {
'json_body': {'minutes': 100},
}
mock_zk_set = mock.Mock()
mock_zk_ensure = mock.Mock()
mock_zk.return_value = mock.Mock(set=mock_zk_set, ensure_path=mock_zk_ensure)
mock_time.time = mock.Mock(return_value=0)
response = pause_autoscaler.update_service_autoscaler_pause(request)
mock_zk_ensure.assert_called_once_with('/autoscaling/paused')
mock_zk_set.assert_called_once_with('/autoscaling/paused', b'6000')
assert response is None
def test_delete_autoscaler_pause():
with mock.patch(
'paasta_tools.utils.KazooClient',
autospec=True,
) as mock_zk, mock.patch(
'paasta_tools.api.views.pause_autoscaler.time',
autospec=True,
) as mock_time, mock.patch(
'paasta_tools.utils.load_system_paasta_config',
autospec=True,
):
request = testing.DummyRequest()
mock_zk_del = mock.Mock()
mock_zk_ensure = mock.Mock()
mock_zk.return_value = mock.Mock(delete=mock_zk_del, ensure_path=mock_zk_ensure)
mock_time.time = mock.Mock(return_value=0)
response = pause_autoscaler.delete_service_autoscaler_pause(request)
mock_zk_ensure.assert_called_once_with('/autoscaling/paused')
mock_zk_del.assert_called_once_with('/autoscaling/paused')
assert response is None
| somic/paasta | tests/api/test_pause_autoscaler.py | Python | apache-2.0 | 3,045 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setup import tc, rm, get_sandbox_path
import logging
logger = logging.getLogger(__name__)
def test_svm(tc):
logger.info("define schema")
schema = [("data", float),("label", str)]
logger.info("creating the frame")
data = [[-48,1],
[-75,1],
[-63,1],
[-57,1],
[73,0],
[-33,1],
[100,0],
[-54,1],
[78,0],
[48,0],
[-55,1],
[23,0],
[45,0],
[75,0],
[95,0],
[73,0],
[7,0],
[39,0],
[-60,1]]
f = tc.frame.create(data, schema=schema)
logger.info(f.inspect())
logger.info("training the model on the frame")
model = tc.models.classification.svm.train(f, ['data'], 'label')
logger.info("predicting the class using the model and the frame")
predicted_frame = model.predict(f)
assert(set(predicted_frame.column_names) == set(['data', 'label', 'predicted_label']))
assert(len(predicted_frame.column_names) == 3)
assert(len(f.column_names) == 2)
metrics = model.test(predicted_frame)
assert(metrics.accuracy == 1.0)
assert(metrics.f_measure == 1.0)
assert(metrics.precision == 1.0)
assert(metrics.recall == 1.0)
| skavulya/spark-tk | integration-tests/tests/test_svm.py | Python | apache-2.0 | 1,997 |
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from .utils import disable_for_loaddata
from . import settings
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
@disable_for_loaddata
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
| cdelguercio/slothauth | slothauth/signals.py | Python | apache-2.0 | 409 |
from common import TestID, load_common_properties, get_proxy_file_path
from eu.emi.security.authn.x509.impl import PEMCredential
from exceptions import Exception
from jarray import array
from net.grinder.script import Test
from net.grinder.script.Grinder import grinder
from org.italiangrid.dav.client import WebDAVClient, WebDAVClientFactory
import mkcol, http_put, http_get, move, delete
import random
import string
import time
import traceback
import uuid
import os
## This loads the base properties inside grinder properties
## Should be left at the top of the script execution
load_common_properties()
error = grinder.logger.error
info = grinder.logger.info
debug = grinder.logger.debug
props = grinder.properties
# Proxy authorized to write on SRM/WEBDAV endpoints
PROXY_FILE = get_proxy_file_path()
# Test specific variables
TEST_DIRECTORY = "getdav"
TEST_STORAGEAREA = props['getdav.storagearea']
TEST_ENDPOINT = props['getdav.endpoint']
TEST_NUMFILES = int(props['getdav.numfiles'])
TEST_FILESIZE = int(props['getdav.filesize'])
# Computed variables
TEST_DIRECTORY_URL = "%s/webdav/%s/%s" % (TEST_ENDPOINT, TEST_STORAGEAREA, TEST_DIRECTORY)
# HTTP Client
DAV_CLIENT = WebDAVClientFactory.newWebDAVClient(TEST_ENDPOINT,PROXY_FILE)
FILE_URLS = []
def getURLFile(filename):
return "%s/%s" % (TEST_DIRECTORY_URL, filename)
def check_http_success(statusCode, expected_code, error_msg):
if (statusCode != expected_code):
msg = "%s. Status code is %s instead of %s" % (error_msg, statusCode, expected_code)
raise Exception(msg)
def create_test_directory_if_needed():
http_get_runner=http_get.TestRunner()
statusCode = http_get_runner(TEST_DIRECTORY_URL,DAV_CLIENT)
if (statusCode != 200):
DAV_CLIENT.mkcol(TEST_DIRECTORY_URL)
def create_local_file_to_upload():
local_file_path = "/tmp/%s" % str(uuid.uuid4());
info("Creating local file %s" % local_file_path)
file = open(local_file_path, "w")
file.seek(TEST_FILESIZE*1024-1)
file.write("\0")
file.close()
size = os.stat(local_file_path).st_size
info("Local file size is %i bytes" % size)
return local_file_path
def upload_file(local_file_path, destination_URL):
http_put_runner=http_put.TestRunner()
statusCode = http_put_runner(destination_URL,local_file_path,DAV_CLIENT)
check_http_success(statusCode, 201, "Error in HTTP PUT")
return
def setup():
info("Setting up GET-WebDAV test.")
local_file_path = create_local_file_to_upload()
for i in range(0,TEST_NUMFILES):
fileURL = getURLFile(str(uuid.uuid4()));
upload_file(local_file_path, fileURL)
FILE_URLS.append(fileURL)
info("FILE_URLS contains %i elements" % len(FILE_URLS))
info("GET-WebDAV test setup completed.")
return
def get_dav(fileURL):
http_get_runner=http_get.TestRunner()
statusCode = http_get_runner(fileURL,DAV_CLIENT)
check_http_success(statusCode, 200, "Error in HTTP GET")
class TestRunner:
def __init__(self):
self.initBarrier = grinder.barrier("InitializationDone")
self.delBarrier = grinder.barrier("ExecutionDone")
if (grinder.threadNumber == 0):
create_test_directory_if_needed()
FILE_URLS = setup()
def __call__(self):
if (grinder.runNumber == 0):
self.initBarrier.await()
try:
test = Test(TestID.GET_DAV, "StoRM GET WebDAV test")
test.record(get_dav)
get_dav(random.choice(FILE_URLS))
except Exception, e:
error("Error executing get-dav: %s" % traceback.format_exc())
def __del__(self):
self.delBarrier.await()
if (grinder.threadNumber == 0):
info("Thread num. %i is the deleter" % grinder.threadNumber)
for i in range(0,TEST_NUMFILES):
info("file to remove: %s" % FILE_URLS[i])
DAV_CLIENT.delete(FILE_URLS[i]) | italiangrid/grinder-load-testsuite | storm/get-dav/getdav.py | Python | apache-2.0 | 3,998 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import pytest
from cryptography.exceptions import (
AlreadyFinalized, InvalidKey, _Reasons
)
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand
from ...utils import raises_unsupported_algorithm
@pytest.mark.requires_backend_interface(interface=HMACBackend)
class TestHKDF(object):
def test_length_limit(self, backend):
big_length = 255 * (hashes.SHA256().digest_size // 8) + 1
with pytest.raises(ValueError):
HKDF(
hashes.SHA256(),
big_length,
salt=None,
info=None,
backend=backend
)
def test_already_finalized(self, backend):
hkdf = HKDF(
hashes.SHA256(),
16,
salt=None,
info=None,
backend=backend
)
hkdf.derive(b"\x01" * 16)
with pytest.raises(AlreadyFinalized):
hkdf.derive(b"\x02" * 16)
hkdf = HKDF(
hashes.SHA256(),
16,
salt=None,
info=None,
backend=backend
)
hkdf.verify(b"\x01" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")
with pytest.raises(AlreadyFinalized):
hkdf.verify(b"\x02" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")
hkdf = HKDF(
hashes.SHA256(),
16,
salt=None,
info=None,
backend=backend
)
def test_verify(self, backend):
hkdf = HKDF(
hashes.SHA256(),
16,
salt=None,
info=None,
backend=backend
)
hkdf.verify(b"\x01" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")
def test_verify_invalid(self, backend):
hkdf = HKDF(
hashes.SHA256(),
16,
salt=None,
info=None,
backend=backend
)
with pytest.raises(InvalidKey):
hkdf.verify(b"\x02" * 16, b"gJ\xfb{\xb1Oi\xc5sMC\xb7\xe4@\xf7u")
def test_unicode_typeerror(self, backend):
with pytest.raises(TypeError):
HKDF(
hashes.SHA256(),
16,
salt=u"foo",
info=None,
backend=backend
)
with pytest.raises(TypeError):
HKDF(
hashes.SHA256(),
16,
salt=None,
info=u"foo",
backend=backend
)
with pytest.raises(TypeError):
hkdf = HKDF(
hashes.SHA256(),
16,
salt=None,
info=None,
backend=backend
)
hkdf.derive(u"foo")
with pytest.raises(TypeError):
hkdf = HKDF(
hashes.SHA256(),
16,
salt=None,
info=None,
backend=backend
)
hkdf.verify(u"foo", b"bar")
with pytest.raises(TypeError):
hkdf = HKDF(
hashes.SHA256(),
16,
salt=None,
info=None,
backend=backend
)
hkdf.verify(b"foo", u"bar")
def test_derive_short_output(self, backend):
hkdf = HKDF(
hashes.SHA256(),
4,
salt=None,
info=None,
backend=backend
)
assert hkdf.derive(b"\x01" * 16) == b"gJ\xfb{"
@pytest.mark.requires_backend_interface(interface=HMACBackend)
class TestHKDFExpand(object):
def test_derive(self, backend):
prk = binascii.unhexlify(
b"077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5"
)
okm = (b"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c"
b"5bf34007208d5b887185865")
info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)
assert binascii.hexlify(hkdf.derive(prk)) == okm
def test_verify(self, backend):
prk = binascii.unhexlify(
b"077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5"
)
okm = (b"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c"
b"5bf34007208d5b887185865")
info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)
assert hkdf.verify(prk, binascii.unhexlify(okm)) is None
def test_invalid_verify(self, backend):
prk = binascii.unhexlify(
b"077709362c2e32df0ddc3f0dc47bba6390b6c73bb50f9c3122ec844ad7c2b3e5"
)
info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)
with pytest.raises(InvalidKey):
hkdf.verify(prk, b"wrong key")
def test_already_finalized(self, backend):
info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)
hkdf.derive(b"first")
with pytest.raises(AlreadyFinalized):
hkdf.derive(b"second")
def test_unicode_error(self, backend):
info = binascii.unhexlify(b"f0f1f2f3f4f5f6f7f8f9")
hkdf = HKDFExpand(hashes.SHA256(), 42, info, backend)
with pytest.raises(TypeError):
hkdf.derive(u"first")
def test_invalid_backend():
pretend_backend = object()
with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
HKDF(hashes.SHA256(), 16, None, None, pretend_backend)
with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
HKDFExpand(hashes.SHA256(), 16, None, pretend_backend)
| hipnusleo/laserjet | resource/pypi/cryptography-1.7.1/tests/hazmat/primitives/test_hkdf.py | Python | apache-2.0 | 6,389 |
"""Support for LIFX lights."""
import asyncio
from datetime import timedelta
from functools import partial
import logging
import math
import sys
import aiolifx as aiolifx_module
import aiolifx_effects as aiolifx_effects_module
import voluptuous as vol
from homeassistant import util
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_NAME,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
ATTR_XY_COLOR,
COLOR_GROUP,
DOMAIN,
LIGHT_TURN_ON_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
VALID_BRIGHTNESS,
VALID_BRIGHTNESS_PCT,
Light,
preprocess_turn_on_alternatives,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_MODE,
ENTITY_MATCH_ALL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.service import async_extract_entity_ids
import homeassistant.util.color as color_util
from . import (
CONF_BROADCAST,
CONF_PORT,
CONF_SERVER,
DATA_LIFX_MANAGER,
DOMAIN as LIFX_DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
DISCOVERY_INTERVAL = 60
MESSAGE_TIMEOUT = 1.0
MESSAGE_RETRIES = 8
UNAVAILABLE_GRACE = 90
SERVICE_LIFX_SET_STATE = "set_state"
ATTR_INFRARED = "infrared"
ATTR_ZONES = "zones"
ATTR_POWER = "power"
LIFX_SET_STATE_SCHEMA = cv.make_entity_service_schema(
{
**LIGHT_TURN_ON_SCHEMA,
ATTR_INFRARED: vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255)),
ATTR_ZONES: vol.All(cv.ensure_list, [cv.positive_int]),
ATTR_POWER: cv.boolean,
}
)
SERVICE_EFFECT_PULSE = "effect_pulse"
SERVICE_EFFECT_COLORLOOP = "effect_colorloop"
SERVICE_EFFECT_STOP = "effect_stop"
ATTR_POWER_ON = "power_on"
ATTR_PERIOD = "period"
ATTR_CYCLES = "cycles"
ATTR_SPREAD = "spread"
ATTR_CHANGE = "change"
PULSE_MODE_BLINK = "blink"
PULSE_MODE_BREATHE = "breathe"
PULSE_MODE_PING = "ping"
PULSE_MODE_STROBE = "strobe"
PULSE_MODE_SOLID = "solid"
PULSE_MODES = [
PULSE_MODE_BLINK,
PULSE_MODE_BREATHE,
PULSE_MODE_PING,
PULSE_MODE_STROBE,
PULSE_MODE_SOLID,
]
LIFX_EFFECT_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_POWER_ON, default=True): cv.boolean,
}
)
LIFX_EFFECT_PULSE_SCHEMA = LIFX_EFFECT_SCHEMA.extend(
{
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
vol.Exclusive(ATTR_COLOR_NAME, COLOR_GROUP): cv.string,
vol.Exclusive(ATTR_RGB_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_XY_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence((cv.small_float, cv.small_float)), vol.Coerce(tuple)
),
vol.Exclusive(ATTR_HS_COLOR, COLOR_GROUP): vol.All(
vol.ExactSequence(
(
vol.All(vol.Coerce(float), vol.Range(min=0, max=360)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
)
),
vol.Coerce(tuple),
),
vol.Exclusive(ATTR_COLOR_TEMP, COLOR_GROUP): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Exclusive(ATTR_KELVIN, COLOR_GROUP): vol.All(
vol.Coerce(int), vol.Range(min=0)
),
ATTR_PERIOD: vol.All(vol.Coerce(float), vol.Range(min=0.05)),
ATTR_CYCLES: vol.All(vol.Coerce(float), vol.Range(min=1)),
ATTR_MODE: vol.In(PULSE_MODES),
}
)
LIFX_EFFECT_COLORLOOP_SCHEMA = LIFX_EFFECT_SCHEMA.extend(
{
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
ATTR_PERIOD: vol.All(vol.Coerce(float), vol.Clamp(min=0.05)),
ATTR_CHANGE: vol.All(vol.Coerce(float), vol.Clamp(min=0, max=360)),
ATTR_SPREAD: vol.All(vol.Coerce(float), vol.Clamp(min=0, max=360)),
ATTR_TRANSITION: vol.All(vol.Coerce(float), vol.Range(min=0)),
}
)
LIFX_EFFECT_STOP_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
def aiolifx():
"""Return the aiolifx module."""
return aiolifx_module
def aiolifx_effects():
"""Return the aiolifx_effects module."""
return aiolifx_effects_module
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the LIFX light platform. Obsolete."""
_LOGGER.warning("LIFX no longer works with light platform configuration.")
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up LIFX from a config entry."""
if sys.platform == "win32":
_LOGGER.warning(
"The lifx platform is known to not work on Windows. "
"Consider using the lifx_legacy platform instead"
)
# Priority 1: manual config
interfaces = hass.data[LIFX_DOMAIN].get(DOMAIN)
if not interfaces:
# Priority 2: scanned interfaces
lifx_ip_addresses = await aiolifx().LifxScan(hass.loop).scan()
interfaces = [{CONF_SERVER: ip} for ip in lifx_ip_addresses]
if not interfaces:
# Priority 3: default interface
interfaces = [{}]
lifx_manager = LIFXManager(hass, async_add_entities)
hass.data[DATA_LIFX_MANAGER] = lifx_manager
for interface in interfaces:
lifx_manager.start_discovery(interface)
return True
def lifx_features(bulb):
"""Return a feature map for this bulb, or a default map if unknown."""
return aiolifx().products.features_map.get(
bulb.product
) or aiolifx().products.features_map.get(1)
def find_hsbk(**kwargs):
"""Find the desired color from a number of possible inputs."""
hue, saturation, brightness, kelvin = [None] * 4
preprocess_turn_on_alternatives(kwargs)
if ATTR_HS_COLOR in kwargs:
hue, saturation = kwargs[ATTR_HS_COLOR]
hue = int(hue / 360 * 65535)
saturation = int(saturation / 100 * 65535)
kelvin = 3500
if ATTR_COLOR_TEMP in kwargs:
kelvin = int(
color_util.color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
)
saturation = 0
if ATTR_BRIGHTNESS in kwargs:
brightness = convert_8_to_16(kwargs[ATTR_BRIGHTNESS])
hsbk = [hue, saturation, brightness, kelvin]
return None if hsbk == [None] * 4 else hsbk
def merge_hsbk(base, change):
"""Copy change on top of base, except when None."""
if change is None:
return None
return [b if c is None else c for b, c in zip(base, change)]
class LIFXManager:
"""Representation of all known LIFX entities."""
def __init__(self, hass, async_add_entities):
"""Initialize the light."""
self.entities = {}
self.hass = hass
self.async_add_entities = async_add_entities
self.effects_conductor = aiolifx_effects().Conductor(hass.loop)
self.discoveries = []
self.cleanup_unsub = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, self.cleanup
)
self.register_set_state()
self.register_effects()
def start_discovery(self, interface):
"""Start discovery on a network interface."""
kwargs = {"discovery_interval": DISCOVERY_INTERVAL}
broadcast_ip = interface.get(CONF_BROADCAST)
if broadcast_ip:
kwargs["broadcast_ip"] = broadcast_ip
lifx_discovery = aiolifx().LifxDiscovery(self.hass.loop, self, **kwargs)
kwargs = {}
listen_ip = interface.get(CONF_SERVER)
if listen_ip:
kwargs["listen_ip"] = listen_ip
listen_port = interface.get(CONF_PORT)
if listen_port:
kwargs["listen_port"] = listen_port
lifx_discovery.start(**kwargs)
self.discoveries.append(lifx_discovery)
@callback
def cleanup(self, event=None):
"""Release resources."""
self.cleanup_unsub()
for discovery in self.discoveries:
discovery.cleanup()
for service in [
SERVICE_LIFX_SET_STATE,
SERVICE_EFFECT_STOP,
SERVICE_EFFECT_PULSE,
SERVICE_EFFECT_COLORLOOP,
]:
self.hass.services.async_remove(LIFX_DOMAIN, service)
def register_set_state(self):
"""Register the LIFX set_state service call."""
async def service_handler(service):
"""Apply a service."""
tasks = []
for light in await self.async_service_to_entities(service):
if service.service == SERVICE_LIFX_SET_STATE:
task = light.set_state(**service.data)
tasks.append(self.hass.async_create_task(task))
if tasks:
await asyncio.wait(tasks)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_LIFX_SET_STATE,
service_handler,
schema=LIFX_SET_STATE_SCHEMA,
)
def register_effects(self):
"""Register the LIFX effects as hass service calls."""
async def service_handler(service):
"""Apply a service, i.e. start an effect."""
entities = await self.async_service_to_entities(service)
if entities:
await self.start_effect(entities, service.service, **service.data)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_EFFECT_PULSE,
service_handler,
schema=LIFX_EFFECT_PULSE_SCHEMA,
)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_EFFECT_COLORLOOP,
service_handler,
schema=LIFX_EFFECT_COLORLOOP_SCHEMA,
)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_EFFECT_STOP,
service_handler,
schema=LIFX_EFFECT_STOP_SCHEMA,
)
async def start_effect(self, entities, service, **kwargs):
"""Start a light effect on entities."""
bulbs = [light.bulb for light in entities]
if service == SERVICE_EFFECT_PULSE:
effect = aiolifx_effects().EffectPulse(
power_on=kwargs.get(ATTR_POWER_ON),
period=kwargs.get(ATTR_PERIOD),
cycles=kwargs.get(ATTR_CYCLES),
mode=kwargs.get(ATTR_MODE),
hsbk=find_hsbk(**kwargs),
)
await self.effects_conductor.start(effect, bulbs)
elif service == SERVICE_EFFECT_COLORLOOP:
preprocess_turn_on_alternatives(kwargs)
brightness = None
if ATTR_BRIGHTNESS in kwargs:
brightness = convert_8_to_16(kwargs[ATTR_BRIGHTNESS])
effect = aiolifx_effects().EffectColorloop(
power_on=kwargs.get(ATTR_POWER_ON),
period=kwargs.get(ATTR_PERIOD),
change=kwargs.get(ATTR_CHANGE),
spread=kwargs.get(ATTR_SPREAD),
transition=kwargs.get(ATTR_TRANSITION),
brightness=brightness,
)
await self.effects_conductor.start(effect, bulbs)
elif service == SERVICE_EFFECT_STOP:
await self.effects_conductor.stop(bulbs)
async def async_service_to_entities(self, service):
"""Return the known entities that a service call mentions."""
if service.data.get(ATTR_ENTITY_ID) == ENTITY_MATCH_ALL:
return self.entities.values()
entity_ids = await async_extract_entity_ids(self.hass, service)
return [
entity
for entity in self.entities.values()
if entity.entity_id in entity_ids
]
@callback
def register(self, bulb):
"""Handle aiolifx detected bulb."""
self.hass.async_create_task(self.register_new_bulb(bulb))
async def register_new_bulb(self, bulb):
"""Handle newly detected bulb."""
if bulb.mac_addr in self.entities:
entity = self.entities[bulb.mac_addr]
entity.registered = True
_LOGGER.debug("%s register AGAIN", entity.who)
await entity.update_hass()
else:
_LOGGER.debug("%s register NEW", bulb.ip_addr)
# Read initial state
ack = AwaitAioLIFX().wait
color_resp = await ack(bulb.get_color)
if color_resp:
version_resp = await ack(bulb.get_version)
if color_resp is None or version_resp is None:
_LOGGER.error("Failed to initialize %s", bulb.ip_addr)
bulb.registered = False
else:
bulb.timeout = MESSAGE_TIMEOUT
bulb.retry_count = MESSAGE_RETRIES
bulb.unregister_timeout = UNAVAILABLE_GRACE
if lifx_features(bulb)["multizone"]:
entity = LIFXStrip(bulb, self.effects_conductor)
elif lifx_features(bulb)["color"]:
entity = LIFXColor(bulb, self.effects_conductor)
else:
entity = LIFXWhite(bulb, self.effects_conductor)
_LOGGER.debug("%s register READY", entity.who)
self.entities[bulb.mac_addr] = entity
self.async_add_entities([entity], True)
@callback
def unregister(self, bulb):
"""Handle aiolifx disappearing bulbs."""
if bulb.mac_addr in self.entities:
entity = self.entities[bulb.mac_addr]
_LOGGER.debug("%s unregister", entity.who)
entity.registered = False
self.hass.async_create_task(entity.async_update_ha_state())
class AwaitAioLIFX:
"""Wait for an aiolifx callback and return the message."""
def __init__(self):
"""Initialize the wrapper."""
self.message = None
self.event = asyncio.Event()
@callback
def callback(self, bulb, message):
"""Handle responses."""
self.message = message
self.event.set()
async def wait(self, method):
"""Call an aiolifx method and wait for its response."""
self.message = None
self.event.clear()
method(callb=self.callback)
await self.event.wait()
return self.message
def convert_8_to_16(value):
"""Scale an 8 bit level into 16 bits."""
return (value << 8) | value
def convert_16_to_8(value):
"""Scale a 16 bit level into 8 bits."""
return value >> 8
class LIFXLight(Light):
"""Representation of a LIFX light."""
def __init__(self, bulb, effects_conductor):
"""Initialize the light."""
self.bulb = bulb
self.effects_conductor = effects_conductor
self.registered = True
self.postponed_update = None
self.lock = asyncio.Lock()
@property
def device_info(self):
"""Return information about the device."""
info = {
"identifiers": {(LIFX_DOMAIN, self.unique_id)},
"name": self.name,
"connections": {(dr.CONNECTION_NETWORK_MAC, self.bulb.mac_addr)},
"manufacturer": "LIFX",
}
model = aiolifx().products.product_map.get(self.bulb.product)
if model is not None:
info["model"] = model
return info
@property
def available(self):
"""Return the availability of the bulb."""
return self.registered
@property
def unique_id(self):
"""Return a unique ID."""
return self.bulb.mac_addr
@property
def name(self):
"""Return the name of the bulb."""
return self.bulb.label
@property
def who(self):
"""Return a string identifying the bulb."""
return f"{self.bulb.ip_addr} ({self.name})"
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
kelvin = lifx_features(self.bulb)["max_kelvin"]
return math.floor(color_util.color_temperature_kelvin_to_mired(kelvin))
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
kelvin = lifx_features(self.bulb)["min_kelvin"]
return math.ceil(color_util.color_temperature_kelvin_to_mired(kelvin))
@property
def supported_features(self):
"""Flag supported features."""
support = SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_EFFECT
bulb_features = lifx_features(self.bulb)
if bulb_features["min_kelvin"] != bulb_features["max_kelvin"]:
support |= SUPPORT_COLOR_TEMP
return support
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
fade = self.bulb.power_level / 65535
return convert_16_to_8(int(fade * self.bulb.color[2]))
@property
def color_temp(self):
"""Return the color temperature."""
_, sat, _, kelvin = self.bulb.color
if sat:
return None
return color_util.color_temperature_kelvin_to_mired(kelvin)
@property
def is_on(self):
"""Return true if light is on."""
return self.bulb.power_level != 0
@property
def effect(self):
"""Return the name of the currently running effect."""
effect = self.effects_conductor.effect(self.bulb)
if effect:
return "lifx_effect_" + effect.name
return None
async def update_hass(self, now=None):
"""Request new status and push it to hass."""
self.postponed_update = None
await self.async_update()
await self.async_update_ha_state()
async def update_during_transition(self, when):
"""Update state at the start and end of a transition."""
if self.postponed_update:
self.postponed_update()
# Transition has started
await self.update_hass()
# Transition has ended
if when > 0:
self.postponed_update = async_track_point_in_utc_time(
self.hass,
self.update_hass,
util.dt.utcnow() + timedelta(milliseconds=when),
)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
kwargs[ATTR_POWER] = True
self.hass.async_create_task(self.set_state(**kwargs))
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
kwargs[ATTR_POWER] = False
self.hass.async_create_task(self.set_state(**kwargs))
async def set_state(self, **kwargs):
"""Set a color on the light and turn it on/off."""
async with self.lock:
bulb = self.bulb
await self.effects_conductor.stop([bulb])
if ATTR_EFFECT in kwargs:
await self.default_effect(**kwargs)
return
if ATTR_INFRARED in kwargs:
bulb.set_infrared(convert_8_to_16(kwargs[ATTR_INFRARED]))
if ATTR_TRANSITION in kwargs:
fade = int(kwargs[ATTR_TRANSITION] * 1000)
else:
fade = 0
# These are both False if ATTR_POWER is not set
power_on = kwargs.get(ATTR_POWER, False)
power_off = not kwargs.get(ATTR_POWER, True)
hsbk = find_hsbk(**kwargs)
# Send messages, waiting for ACK each time
ack = AwaitAioLIFX().wait
if not self.is_on:
if power_off:
await self.set_power(ack, False)
if hsbk:
await self.set_color(ack, hsbk, kwargs)
if power_on:
await self.set_power(ack, True, duration=fade)
else:
if power_on:
await self.set_power(ack, True)
if hsbk:
await self.set_color(ack, hsbk, kwargs, duration=fade)
if power_off:
await self.set_power(ack, False, duration=fade)
# Avoid state ping-pong by holding off updates as the state settles
await asyncio.sleep(0.3)
# Update when the transition starts and ends
await self.update_during_transition(fade)
async def set_power(self, ack, pwr, duration=0):
"""Send a power change to the bulb."""
await ack(partial(self.bulb.set_power, pwr, duration=duration))
async def set_color(self, ack, hsbk, kwargs, duration=0):
"""Send a color change to the bulb."""
hsbk = merge_hsbk(self.bulb.color, hsbk)
await ack(partial(self.bulb.set_color, hsbk, duration=duration))
async def default_effect(self, **kwargs):
"""Start an effect with default parameters."""
service = kwargs[ATTR_EFFECT]
data = {ATTR_ENTITY_ID: self.entity_id}
await self.hass.services.async_call(LIFX_DOMAIN, service, data)
async def async_update(self):
"""Update bulb status."""
if self.available and not self.lock.locked():
await AwaitAioLIFX().wait(self.bulb.get_color)
class LIFXWhite(LIFXLight):
"""Representation of a white-only LIFX light."""
@property
def effect_list(self):
"""Return the list of supported effects for this light."""
return [SERVICE_EFFECT_PULSE, SERVICE_EFFECT_STOP]
class LIFXColor(LIFXLight):
"""Representation of a color LIFX light."""
@property
def supported_features(self):
"""Flag supported features."""
support = super().supported_features
support |= SUPPORT_COLOR
return support
@property
def effect_list(self):
"""Return the list of supported effects for this light."""
return [SERVICE_EFFECT_COLORLOOP, SERVICE_EFFECT_PULSE, SERVICE_EFFECT_STOP]
@property
def hs_color(self):
"""Return the hs value."""
hue, sat, _, _ = self.bulb.color
hue = hue / 65535 * 360
sat = sat / 65535 * 100
return (hue, sat) if sat else None
class LIFXStrip(LIFXColor):
"""Representation of a LIFX light strip with multiple zones."""
async def set_color(self, ack, hsbk, kwargs, duration=0):
"""Send a color change to the bulb."""
bulb = self.bulb
num_zones = len(bulb.color_zones)
zones = kwargs.get(ATTR_ZONES)
if zones is None:
# Fast track: setting all zones to the same brightness and color
# can be treated as a single-zone bulb.
if hsbk[2] is not None and hsbk[3] is not None:
await super().set_color(ack, hsbk, kwargs, duration)
return
zones = list(range(0, num_zones))
else:
zones = [x for x in set(zones) if x < num_zones]
# Zone brightness is not reported when powered off
if not self.is_on and hsbk[2] is None:
await self.set_power(ack, True)
await asyncio.sleep(0.3)
await self.update_color_zones()
await self.set_power(ack, False)
await asyncio.sleep(0.3)
# Send new color to each zone
for index, zone in enumerate(zones):
zone_hsbk = merge_hsbk(bulb.color_zones[zone], hsbk)
apply = 1 if (index == len(zones) - 1) else 0
set_zone = partial(
bulb.set_color_zones,
start_index=zone,
end_index=zone,
color=zone_hsbk,
duration=duration,
apply=apply,
)
await ack(set_zone)
async def async_update(self):
"""Update strip status."""
if self.available and not self.lock.locked():
await super().async_update()
await self.update_color_zones()
async def update_color_zones(self):
"""Get updated color information for each zone."""
zone = 0
top = 1
while self.available and zone < top:
# Each get_color_zones can update 8 zones at once
resp = await AwaitAioLIFX().wait(
partial(self.bulb.get_color_zones, start_index=zone)
)
if resp:
zone += 8
top = resp.count
# We only await multizone responses so don't ask for just one
if zone == top - 1:
zone -= 1
| leppa/home-assistant | homeassistant/components/lifx/light.py | Python | apache-2.0 | 24,645 |
#!/usr/bin/env python
#
# Copyright 2014 cloudysunny14.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ryu.base import app_manager
from ryu.controller import dpset
from ryu.controller.handler import set_ev_cls
from ryu.exception import OFPUnknownVersion
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.lib import hub
from ryu.lib.ovs import bridge
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from lib import qoslib
LOG = logging.getLogger(__name__)
LOG_TEST_FINISH = 'TEST_FINISHED: Tests=[%s] (OK=%s NG=%s SKIP=%s)'
OVSDB_ADDR = 'tcp:127.0.0.1:6632'
class OFMangleTester(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {'dpset': dpset.DPSet,
'qoslib': qoslib.QoSLib}
_OFCTL = {ofproto_v1_0.OFP_VERSION: ofctl_v1_0,
ofproto_v1_2.OFP_VERSION: ofctl_v1_2,
ofproto_v1_3.OFP_VERSION: ofctl_v1_3}
def __init__(self, *args, **kwargs):
super(OFMangleTester, self).__init__(*args, **kwargs)
self.dpset = kwargs['dpset']
self.qoslib = kwargs['qoslib']
self.qoslib.use_switch_flow = False
self.waiters = {}
self.pending = []
self.results = {}
for t in dir(self):
if t.startswith("test_"):
self.pending.append(t)
self.pending.sort(reverse=True)
@set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER)
def datapath_handler(self, ev):
# Target switch datapath
self.dp = ev.dp
version = self.dp.ofproto.OFP_VERSION
if version not in self._OFCTL:
raise OFPUnknownVersion(version=version)
self.ofctl = self._OFCTL[version]
hub.spawn(self._do_test)
def test_queue_setup(self):
self.ovsctl = bridge.OVSBridge(self.dp.id, OVSDB_ADDR)
queue = qoslib.QoSLib.queue_tree(self.ovsctl, self.dp)
queue.queue('high-priority', '500', '500')
self.qoslib.register_queue(queue)
queue = qoslib.QoSLib.queue_tree(self.ovsctl, self.dp)
queue.queue('high-priority', '700', '700')
self.qoslib.register_queue(queue)
queue = qoslib.QoSLib.queue_tree(self.ovsctl, self.dp)
queue.queue('best-effort', '10000', '10000')
def _print_results(self):
LOG.info("TEST_RESULTS:")
ok = 0
ng = 0
skip = 0
for t in sorted(self.results.keys()):
if self.results[t] is True:
ok += 1
else:
ng += 1
LOG.info(" %s: %s", t, self.results[t])
LOG.info(LOG_TEST_FINISH, len(self.pending), ok, ng, skip)
def _do_test(self):
""""""
for test in self.pending:
self.results[test] = getattr(self, test)()
self._print_results()
| cloudysunny14/faucet | tests/test_ovs_mangle.py | Python | apache-2.0 | 3,411 |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Utils exporting data from AFF4 to the rest of the world."""
import os
import Queue
import stat
import time
import logging
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import serialize
from grr.lib import threadpool
from grr.lib import type_info
from grr.lib import utils
from grr.lib.aff4_objects import aff4_grr
BUFFER_SIZE = 16 * 1024 * 1024
def GetAllClients(token=None):
"""Return a list of all client urns."""
results = []
for urn in aff4.FACTORY.Open(aff4.ROOT_URN, token=token).ListChildren():
try:
results.append(rdfvalue.ClientURN(urn))
except type_info.TypeValueError:
pass
return results
class IterateAllClientUrns(object):
"""Class to iterate over all URNs."""
THREAD_POOL_NAME = "ClientUrnIter"
QUEUE_TIMEOUT = 30
def __init__(self, func=None, max_threads=10, token=None):
"""Iterate over all clients in a threadpool.
Args:
func: A function to call with each client urn.
max_threads: Number of threads to use.
token: Auth token.
Raises:
RuntimeError: If function not specified.
"""
self.thread_pool = threadpool.ThreadPool.Factory(self.THREAD_POOL_NAME,
max_threads)
self.thread_pool.Start()
self.token = token
self.func = func
self.broken_subjects = [] # Entries that are broken or fail to run.
self.out_queue = Queue.Queue()
def GetInput(self):
"""Yield client urns."""
clients = GetAllClients(token=self.token)
logging.debug("Got %d clients", len(clients))
return clients
def Run(self):
"""Run the iteration."""
count = 0
for count, input_data in enumerate(self.GetInput()):
if count % 2000 == 0:
logging.debug("%d processed.", count)
args = (input_data, self.out_queue, self.token)
self.thread_pool.AddTask(target=self.IterFunction, args=args,
name=self.THREAD_POOL_NAME)
while count >= 0:
try:
# We only use the timeout to wait if we got to the end of the Queue but
# didn't process everything yet.
out = self.out_queue.get(timeout=self.QUEUE_TIMEOUT, block=True)
if out:
yield out
count -= 1
except Queue.Empty:
break
# Join and stop to clean up the threadpool.
self.thread_pool.Stop()
def IterFunction(self, *args):
"""Function to run on each input. This can be overridden."""
self.func(*args)
class IterateAllClients(IterateAllClientUrns):
"""Class to iterate over all GRR Client objects."""
def __init__(self, max_age, client_chunksize=25, **kwargs):
"""Iterate over all clients in a threadpool.
Args:
max_age: Maximum age in seconds of clients to check.
client_chunksize: A function to call with each client urn.
**kwargs: Arguments passed to init.
"""
super(IterateAllClients, self).__init__(**kwargs)
self.client_chunksize = client_chunksize
self.max_age = max_age
def GetInput(self):
"""Yield client urns."""
client_list = GetAllClients(token=self.token)
logging.debug("Got %d clients", len(client_list))
for client_group in utils.Grouper(client_list, self.client_chunksize):
for fd in aff4.FACTORY.MultiOpen(client_group, mode="r",
aff4_type="VFSGRRClient",
token=self.token):
if isinstance(fd, aff4_grr.VFSGRRClient):
# Skip if older than max_age
oldest_time = (time.time() - self.max_age) * 1e6
if fd.Get(aff4.VFSGRRClient.SchemaCls.PING) >= oldest_time:
yield fd
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE):
"""Download an aff4 file to the local filesystem overwriting it if it exists.
Args:
file_obj: An aff4 object that supports the file interface (Read, Seek)
target_path: Full path of file to write to.
buffer_size: Read in chunks this size.
"""
logging.info("Downloading: %s to: %s", file_obj.urn, target_path)
target_file = open(target_path, "w")
file_obj.Seek(0)
count = 0
data_buffer = file_obj.Read(buffer_size)
while data_buffer:
target_file.write(data_buffer)
data_buffer = file_obj.Read(buffer_size)
count += 1
if not count % 3:
logging.debug("Downloading: %s: %s done", file_obj.urn,
utils.FormatNumberAsString(count*buffer_size))
target_file.close()
def RecursiveDownload(dir_obj, target_dir, max_depth=10, depth=1,
overwrite=False, max_threads=10):
"""Recursively downloads a file entry to the target path.
Args:
dir_obj: An aff4 object that contains children.
target_dir: Full path of the directory to write to.
max_depth: Depth to download to. 1 means just the directory itself.
depth: Current depth of recursion.
overwrite: Should we overwrite files that exist.
max_threads: Use this many threads to do the downloads.
"""
if (not isinstance(dir_obj, aff4.AFF4Volume) or
isinstance(dir_obj, aff4.HashImage)):
return
# Reuse the same threadpool as we call recursively.
thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads)
thread_pool.Start()
for sub_file_entry in dir_obj.OpenChildren():
path_elements = [target_dir]
sub_target_dir = u"/".join(path_elements)
try:
# Any file-like object with data in AFF4 should inherit AFF4Stream.
if isinstance(sub_file_entry, aff4.AFF4Stream):
args = (sub_file_entry.urn, sub_target_dir, sub_file_entry.token,
overwrite)
thread_pool.AddTask(target=CopyAFF4ToLocal, args=args,
name="Downloader")
elif "Container" in sub_file_entry.behaviours:
if depth >= max_depth: # Don't go any deeper.
continue
try:
os.makedirs(sub_target_dir)
except OSError:
pass
RecursiveDownload(sub_file_entry, sub_target_dir, overwrite=overwrite,
depth=depth+1)
except IOError:
logging.exception("Unable to download %s", sub_file_entry.urn)
finally:
sub_file_entry.Close()
# Join and stop the threadpool.
if depth <= 1:
thread_pool.Stop()
def DownloadCollection(coll_path, target_path, token=None, overwrite=False,
dump_client_info=False, max_threads=10):
"""Iterate through a Collection object downloading all files.
Args:
coll_path: Path to an AFF4 collection.
target_path: Base directory to write to.
token: Token for access.
overwrite: If True, overwrite existing files.
dump_client_info: If True, this will detect client paths, and dump a yaml
version of the client object to the root path. This is useful for seeing
the hostname/users of the machine the client id refers to.
max_threads: Use this many threads to do the downloads.
"""
completed_clients = set()
try:
coll = aff4.FACTORY.Open(coll_path, aff4_type="RDFValueCollection",
token=token)
except IOError:
logging.error("%s is not a valid collection. Typo? "
"Are you sure something was written to it?", coll_path)
return
thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads)
thread_pool.Start()
logging.info("Expecting to download %s files", coll.size)
# Collections can include anything they want, but we only handle RDFURN and
# StatEntry entries in this function.
for grr_message in coll:
source = None
# If a raw message, work out the type.
if isinstance(grr_message, rdfvalue.GrrMessage):
source = grr_message.source
grr_message = grr_message.payload
# Collections can contain AFF4ObjectSummary objects which encapsulate
# RDFURNs and StatEntrys.
if isinstance(grr_message, rdfvalue.AFF4ObjectSummary):
urn = grr_message.urn
elif isinstance(grr_message, rdfvalue.RDFURN):
urn = grr_message
elif isinstance(grr_message, rdfvalue.StatEntry):
urn = rdfvalue.RDFURN(grr_message.aff4path)
elif isinstance(grr_message, rdfvalue.FileFinderResult):
urn = rdfvalue.RDFURN(grr_message.stat_entry.aff4path)
elif isinstance(grr_message, rdfvalue.RDFBytes):
try:
os.makedirs(target_path)
except OSError:
pass
try:
# We just dump out bytes and carry on.
client_id = source.Split()[0]
with open(os.path.join(target_path, client_id), "wb") as fd:
fd.write(str(grr_message))
except AttributeError:
pass
continue
else:
continue
# Handle dumping client info, but only once per client.
client_id = urn.Split()[0]
re_match = aff4.AFF4Object.VFSGRRClient.CLIENT_ID_RE.match(client_id)
if dump_client_info and re_match and client_id not in completed_clients:
args = (rdfvalue.RDFURN(client_id), target_path, token, overwrite)
thread_pool.AddTask(target=DumpClientYaml, args=args,
name="ClientYamlDownloader")
completed_clients.add(client_id)
# Now queue downloading the actual files.
args = (urn, target_path, token, overwrite)
thread_pool.AddTask(target=CopyAFF4ToLocal, args=args,
name="Downloader")
# Join and stop the threadpool.
thread_pool.Stop()
def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False):
"""Copy an AFF4 object that supports a read interface to local filesystem.
Args:
aff4_urn: URN of thing to copy.
target_dir: Directory to copy the file to.
token: Auth token.
overwrite: If True overwrite the file if it exists.
By default file will only be overwritten if file size differs.
"""
try:
fd = aff4.FACTORY.Open(aff4_urn, "AFF4Stream", token=token)
filepath = os.path.join(target_dir, fd.urn.Path()[1:])
if not os.path.isfile(filepath):
try:
# Ensure directory exists.
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
DownloadFile(fd, filepath)
elif (os.stat(filepath)[stat.ST_SIZE] != fd.Get(fd.Schema.SIZE) or
overwrite):
# We should overwrite because user said, or file sizes differ.
DownloadFile(fd, filepath)
else:
logging.info("File %s exists, skipping", filepath)
except IOError as e:
logging.error("Failed to read %s due to %s", aff4_urn, e)
raise
def DumpClientYaml(client_urn, target_dir, token=None, overwrite=False):
"""Dump a yaml file containing client info."""
fd = aff4.FACTORY.Open(client_urn, "VFSGRRClient", token=token)
dirpath = os.path.join(target_dir, fd.urn.Split()[0])
try:
# Due to threading this can actually be created by another thread.
os.makedirs(dirpath)
except OSError:
pass
filepath = os.path.join(dirpath, "client_info.yaml")
if not os.path.isfile(filepath) or overwrite:
with open(filepath, "w") as out_file:
out_file.write(serialize.YamlDumper(fd))
| simsong/grr-insider | lib/export_utils.py | Python | apache-2.0 | 11,103 |
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse, os
import torch
"""
Checkpoints saved by train.py contain not only model parameters but also
optimizer states, losses, a history of generated images, and other statistics.
This information is very useful for development and debugging models, but makes
the saved checkpoints very large. This utility script strips away all extra
information from saved checkpoints, keeping only the saved models.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input_checkpoint', default=None)
parser.add_argument('--output_checkpoint', default=None)
parser.add_argument('--input_dir', default=None)
parser.add_argument('--output_dir', default=None)
parser.add_argument('--keep_discriminators', type=int, default=1)
def main(args):
if args.input_checkpoint is not None:
handle_checkpoint(args, args.input_checkpoint, args.output_checkpoint)
if args.input_dir is not None:
handle_dir(args, args.input_dir, args.output_dir)
def handle_dir(args, input_dir, output_dir):
for fn in os.listdir(input_dir):
if not fn.endswith('.pt'):
continue
input_path = os.path.join(input_dir, fn)
output_path = os.path.join(output_dir, fn)
handle_checkpoint(args, input_path, output_path)
def handle_checkpoint(args, input_path, output_path):
input_checkpoint = torch.load(input_path)
keep = ['args', 'model_state', 'model_kwargs']
if args.keep_discriminators == 1:
keep += ['d_img_state', 'd_img_kwargs', 'd_obj_state', 'd_obj_kwargs']
output_checkpoint = {}
for k, v in input_checkpoint.items():
if k in keep:
output_checkpoint[k] = v
torch.save(output_checkpoint, output_path)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| google/sg2im | scripts/strip_checkpoint.py | Python | apache-2.0 | 2,313 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class MasterException(Exception):
pass
class PathException(Exception):
pass
class CommunicableException(Exception):
pass
class LiveActivityException(Exception):
pass
class StatusableException(Exception):
pass
class ActivityException(Exception):
pass
class SerializerException(Exception):
pass
class ControllerNotFoundException(Exception):
pass
class LiveActivityGroupNotFoundException(Exception):
pass
class LiveActivityNotFoundException(Exception):
pass
class ActivityNotFoundException(Exception):
pass
class SpaceNotFoundException(Exception):
pass
| EndPointCorp/interactivespaces-python-api | interactivespaces/exception.py | Python | apache-2.0 | 656 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network Hosts are responsible for allocating ips and setting up network.
There are multiple backend drivers that handle specific types of networking
topologies. All of the network commands are issued to a subclass of
:class:`NetworkManager`.
**Related Flags**
:network_driver: Driver to use for network creation
:flat_network_bridge: Bridge device for simple network instances
:flat_interface: FlatDhcp will bridge into this interface if set
:flat_network_dns: Dns for simple network
:vlan_start: First VLAN for private networks
:vpn_ip: Public IP for the cloudpipe VPN servers
:vpn_start: First Vpn port for private networks
:cnt_vpn_clients: Number of addresses reserved for vpn clients
:network_size: Number of addresses in each private subnet
:floating_range: Floating IP address block
:fixed_range: Fixed IP address block
:date_dhcp_on_disassociate: Whether to update dhcp when fixed_ip
is disassociated
:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip
is disassociated
:create_unique_mac_address_attempts: Number of times to attempt creating
a unique mac address
"""
import datetime
import itertools
import math
import netaddr
import socket
from eventlet import greenpool
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import log as logging
from nova import manager
from nova import quota
from nova import utils
from nova import rpc
from nova.network import api as network_api
from nova.compute import api as compute_api
import random
LOG = logging.getLogger("nova.network.manager")
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', None,
'Bridge for simple network instances')
flags.DEFINE_string('flat_network_dns', '8.8.4.4',
'Dns for simple network')
flags.DEFINE_bool('flat_injected', False,
'Whether to attempt to inject network setup into guest')
flags.DEFINE_string('flat_interface', None,
'FlatDhcp will bridge into this interface if set')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
flags.DEFINE_string('vlan_interface', None,
'vlans will bridge into this interface if set')
flags.DEFINE_integer('num_networks', 1, 'Number of networks to support')
flags.DEFINE_string('vpn_ip', '$my_ip',
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
flags.DEFINE_bool('multi_host', False,
'Default value for multi_host in networks')
flags.DEFINE_integer('network_size', 256,
'Number of addresses in each private subnet')
flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
flags.DEFINE_string('gateway_v6', None, 'Default IPv6 gateway')
flags.DEFINE_integer('cnt_vpn_clients', 0,
'Number of addresses reserved for vpn clients')
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
'Driver to use for network creation')
flags.DEFINE_bool('update_dhcp_on_disassociate', False,
'Whether to update dhcp when fixed_ip is disassociated')
flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600,
'Seconds after which a deallocated ip is disassociated')
flags.DEFINE_integer('create_unique_mac_address_attempts', 5,
'Number of attempts to create unique mac address')
flags.DEFINE_bool('auto_assign_floating_ip', False,
'Autoassigning floating ip to VM')
flags.DEFINE_string('network_host', socket.gethostname(),
'Network host to use for ip allocation in flat modes')
flags.DEFINE_bool('fake_call', False,
'If True, skip using the queue and make local calls')
flags.DEFINE_bool('force_dhcp_release', False,
'If True, send a dhcp release on instance termination')
class AddressAlreadyAllocated(exception.Error):
"""Address was already allocated."""
pass
class RPCAllocateFixedIP(object):
"""Mixin class originally for FlatDCHP and VLAN network managers.
used since they share code to RPC.call allocate_fixed_ip on the
correct network host to configure dnsmasq
"""
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
green_pool = greenpool.GreenPool()
vpn = kwargs.get('vpn')
requested_networks = kwargs.get('requested_networks')
for network in networks:
address = None
if requested_networks is not None:
for address in (fixed_ip for (uuid, fixed_ip) in \
requested_networks if network['uuid'] == uuid):
break
# NOTE(vish): if we are not multi_host pass to the network host
if not network['multi_host']:
host = network['host']
# NOTE(vish): if there is no network host, set one
if host == None:
host = rpc.call(context, FLAGS.network_topic,
{'method': 'set_network_host',
'args': {'network_ref': network}})
if host != self.host:
# need to call allocate_fixed_ip to correct network host
topic = self.db.queue_get_for(context,
FLAGS.network_topic,
host)
args = {}
args['instance_id'] = instance_id
args['network_id'] = network['id']
args['address'] = address
args['vpn'] = vpn
green_pool.spawn_n(rpc.call, context, topic,
{'method': '_rpc_allocate_fixed_ip',
'args': args})
else:
# i am the correct host, run here
self.allocate_fixed_ip(context, instance_id, network,
vpn=vpn, address=address)
# wait for all of the allocates (if any) to finish
green_pool.waitall()
def _rpc_allocate_fixed_ip(self, context, instance_id, network_id,
**kwargs):
"""Sits in between _allocate_fixed_ips and allocate_fixed_ip to
perform network lookup on the far side of rpc.
"""
network = self.db.network_get(context, network_id)
self.allocate_fixed_ip(context, instance_id, network, **kwargs)
class FloatingIP(object):
"""Mixin class for adding floating IP functionality to a manager."""
def init_host_floating_ips(self):
"""Configures floating ips owned by host."""
admin_context = context.get_admin_context()
try:
floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
self.host)
except exception.NotFound:
return
for floating_ip in floating_ips:
if floating_ip.get('fixed_ip', None):
fixed_address = floating_ip['fixed_ip']['address']
# NOTE(vish): The False here is because we ignore the case
# that the ip is already bound.
self.driver.bind_floating_ip(floating_ip['address'], False)
self.driver.ensure_floating_forward(floating_ip['address'],
fixed_address)
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the floating IP resources for an instance.
calls super class allocate_for_instance() as well
rpc.called by network_api
"""
instance_id = kwargs.get('instance_id')
project_id = kwargs.get('project_id')
requested_networks = kwargs.get('requested_networks')
LOG.debug(_("floating IP allocation for instance |%s|"), instance_id,
context=context)
# call the next inherited class's allocate_for_instance()
# which is currently the NetworkManager version
# do this first so fixed ip is already allocated
ips = super(FloatingIP, self).allocate_for_instance(context, **kwargs)
if FLAGS.auto_assign_floating_ip:
# allocate a floating ip (public_ip is just the address string)
public_ip = self.allocate_floating_ip(context, project_id)
# set auto_assigned column to true for the floating ip
self.db.floating_ip_set_auto_assigned(context, public_ip)
# get the floating ip object from public_ip string
floating_ip = self.db.floating_ip_get_by_address(context,
public_ip)
# get the first fixed_ip belonging to the instance
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
fixed_ip = fixed_ips[0] if fixed_ips else None
# call to correct network host to associate the floating ip
self.network_api.associate_floating_ip(context,
floating_ip,
fixed_ip,
affect_auto_assigned=True)
return ips
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating floating IP resources for an instance.
calls super class deallocate_for_instance() as well.
rpc.called by network_api
"""
instance_id = kwargs.get('instance_id')
LOG.debug(_("floating IP deallocation for instance |%s|"), instance_id,
context=context)
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
# add to kwargs so we can pass to super to save a db lookup there
kwargs['fixed_ips'] = fixed_ips
for fixed_ip in fixed_ips:
# disassociate floating ips related to fixed_ip
for floating_ip in fixed_ip.floating_ips:
address = floating_ip['address']
self.network_api.disassociate_floating_ip(context, address)
# deallocate if auto_assigned
if floating_ip['auto_assigned']:
self.network_api.release_floating_ip(context,
address,
True)
# call the next inherited class's deallocate_for_instance()
# which is currently the NetworkManager version
# call this after so floating IPs are handled first
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def allocate_floating_ip(self, context, project_id):
"""Gets an floating ip from the pool."""
# NOTE(tr3buchet): all networks hosts in zone now use the same pool
LOG.debug("QUOTA: %s" % quota.allowed_floating_ips(context, 1))
if quota.allowed_floating_ips(context, 1) < 1:
LOG.warn(_('Quota exceeded for %s, tried to allocate '
'address'),
context.project_id)
raise quota.QuotaError(_('Address quota exceeded. You cannot '
'allocate any more addresses'))
# TODO(vish): add floating ips through manage command
return self.db.floating_ip_allocate_address(context,
project_id)
def associate_floating_ip(self, context, floating_address, fixed_address):
"""Associates an floating ip to a fixed ip."""
floating_ip = self.db.floating_ip_get_by_address(context,
floating_address)
if floating_ip['fixed_ip']:
raise exception.FloatingIpAlreadyInUse(
address=floating_ip['address'],
fixed_ip=floating_ip['fixed_ip']['address'])
self.db.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
self.host)
self.driver.bind_floating_ip(floating_address)
self.driver.ensure_floating_forward(floating_address, fixed_address)
def disassociate_floating_ip(self, context, floating_address):
"""Disassociates a floating ip."""
fixed_address = self.db.floating_ip_disassociate(context,
floating_address)
self.driver.unbind_floating_ip(floating_address)
self.driver.remove_floating_forward(floating_address, fixed_address)
def deallocate_floating_ip(self, context, floating_address):
"""Returns an floating ip to the pool."""
self.db.floating_ip_deallocate(context, floating_address)
class NetworkManager(manager.SchedulerDependentManager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
host management:
hosts configure themselves for networks they are assigned to in the
table upon startup. If there are networks in the table which do not
have hosts, those will be filled in and have hosts configured
as the hosts pick them up one at time during their periodic task.
The one at a time part is to flatten the layout to help scale
"""
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
# If True, this manager requires VIF to create VLAN tag.
SHOULD_CREATE_VLAN = False
timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs):
if not network_driver:
network_driver = FLAGS.network_driver
self.driver = utils.import_object(network_driver)
self.network_api = network_api.API()
self.compute_api = compute_api.API()
super(NetworkManager, self).__init__(service_name='network',
*args, **kwargs)
@utils.synchronized('get_dhcp')
def _get_dhcp_ip(self, context, network_ref, host=None):
"""Get the proper dhcp address to listen on."""
# NOTE(vish): this is for compatibility
if not network_ref['multi_host']:
return network_ref['gateway']
if not host:
host = self.host
network_id = network_ref['id']
try:
fip = self.db.fixed_ip_get_by_network_host(context,
network_id,
host)
return fip['address']
except exception.FixedIpNotFoundForNetworkHost:
elevated = context.elevated()
return self.db.fixed_ip_associate_pool(elevated,
network_id,
host=host)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
# NOTE(vish): Set up networks for which this host already has
# an ip address.
ctxt = context.get_admin_context()
for network in self.db.network_get_all_by_host(ctxt, self.host):
self._setup_network(ctxt, network)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
super(NetworkManager, self).periodic_tasks(context)
if self.timeout_fixed_ips:
now = utils.utcnow()
timeout = FLAGS.fixed_ip_disassociate_timeout
time = now - datetime.timedelta(seconds=timeout)
num = self.db.fixed_ip_disassociate_all_by_timeout(context,
self.host,
time)
if num:
LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num)
def set_network_host(self, context, network_ref):
"""Safely sets the host of the network."""
LOG.debug(_('setting network host'), context=context)
host = self.db.network_set_host(context,
network_ref['id'],
self.host)
return host
def _do_trigger_security_group_members_refresh_for_instance(self,
instance_id):
admin_context = context.get_admin_context()
instance_ref = self.db.instance_get(admin_context, instance_id)
groups = instance_ref['security_groups']
group_ids = [group['id'] for group in groups]
self.compute_api.trigger_security_group_members_refresh(admin_context,
group_ids)
def _get_networks_for_instance(self, context, instance_id, project_id,
requested_networks=None):
"""Determine & return which networks an instance should connect to."""
# TODO(tr3buchet) maybe this needs to be updated in the future if
# there is a better way to determine which networks
# a non-vlan instance should connect to
if requested_networks is not None and len(requested_networks) != 0:
network_uuids = [uuid for (uuid, fixed_ip) in requested_networks]
networks = self.db.network_get_all_by_uuids(context,
network_uuids)
else:
try:
networks = self.db.network_get_all(context)
except exception.NoNetworksFound:
return []
# return only networks which are not vlan networks
return [network for network in networks if
not network['vlan']]
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the various network resources for an instance.
rpc.called by network_api
"""
instance_id = kwargs.pop('instance_id')
host = kwargs.pop('host')
project_id = kwargs.pop('project_id')
type_id = kwargs.pop('instance_type_id')
requested_networks = kwargs.get('requested_networks')
vpn = kwargs.pop('vpn')
admin_context = context.elevated()
LOG.debug(_("network allocations for instance %s"), instance_id,
context=context)
networks = self._get_networks_for_instance(admin_context,
instance_id, project_id,
requested_networks=requested_networks)
self._allocate_mac_addresses(context, instance_id, networks)
self._allocate_fixed_ips(admin_context, instance_id,
host, networks, vpn=vpn,
requested_networks=requested_networks)
return self.get_instance_nw_info(context, instance_id, type_id, host)
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating various network resources for an instance.
rpc.called by network_api
kwargs can contain fixed_ips to circumvent another db lookup
"""
instance_id = kwargs.pop('instance_id')
try:
fixed_ips = kwargs.get('fixed_ips') or \
self.db.fixed_ip_get_by_instance(context, instance_id)
except exception.FixedIpNotFoundForInstance:
fixed_ips = []
LOG.debug(_("network deallocation for instance |%s|"), instance_id,
context=context)
# deallocate fixed ips
for fixed_ip in fixed_ips:
self.deallocate_fixed_ip(context, fixed_ip['address'], **kwargs)
# deallocate vifs (mac addresses)
self.db.virtual_interface_delete_by_instance(context, instance_id)
def get_instance_nw_info(self, context, instance_id,
instance_type_id, host):
"""Creates network info list for instance.
called by allocate_for_instance and netowrk_api
context needs to be elevated
:returns: network info list [(network,info),(network,info)...]
where network = dict containing pertinent data from a network db object
and info = dict containing pertinent networking data
"""
# TODO(tr3buchet) should handle floating IPs as well?
try:
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
except exception.FixedIpNotFoundForInstance:
LOG.warn(_('No fixed IPs for instance %s'), instance_id)
fixed_ips = []
vifs = self.db.virtual_interface_get_by_instance(context, instance_id)
flavor = self.db.instance_type_get(context, instance_type_id)
network_info = []
# a vif has an address, instance_id, and network_id
# it is also joined to the instance and network given by those IDs
for vif in vifs:
network = vif['network']
if network is None:
continue
# determine which of the instance's IPs belong to this network
network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
fixed_ip['network_id'] == network['id']]
# TODO(tr3buchet) eventually "enabled" should be determined
def ip_dict(ip):
return {
'ip': ip,
'netmask': network['netmask'],
'enabled': '1'}
def ip6_dict():
return {
'ip': ipv6.to_global(network['cidr_v6'],
vif['address'],
network['project_id']),
'netmask': network['netmask_v6'],
'enabled': '1'}
network_dict = {
'bridge': network['bridge'],
'id': network['id'],
'cidr': network['cidr'],
'cidr_v6': network['cidr_v6'],
'injected': network['injected'],
'vlan': network['vlan'],
'bridge_interface': network['bridge_interface'],
'multi_host': network['multi_host']}
if network['multi_host']:
dhcp_server = self._get_dhcp_ip(context, network, host)
else:
dhcp_server = self._get_dhcp_ip(context,
network,
network['host'])
info = {
'label': network['label'],
'gateway': network['gateway'],
'dhcp_server': dhcp_server,
'broadcast': network['broadcast'],
'mac': vif['address'],
'vif_uuid': vif['uuid'],
'rxtx_cap': flavor['rxtx_cap'],
'dns': [],
'ips': [ip_dict(ip) for ip in network_IPs],
'should_create_bridge': self.SHOULD_CREATE_BRIDGE,
'should_create_vlan': self.SHOULD_CREATE_VLAN}
if network['cidr_v6']:
info['ip6s'] = [ip6_dict()]
# TODO(tr3buchet): handle ip6 routes here as well
if network['gateway_v6']:
info['gateway6'] = network['gateway_v6']
if network['dns1']:
info['dns'].append(network['dns1'])
if network['dns2']:
info['dns'].append(network['dns2'])
network_info.append((network_dict, info))
return network_info
def _allocate_mac_addresses(self, context, instance_id, networks):
"""Generates mac addresses and creates vif rows in db for them."""
for network in networks:
self.add_virtual_interface(context, instance_id, network['id'])
def add_virtual_interface(self, context, instance_id, network_id):
vif = {'address': self.generate_mac_address(),
'instance_id': instance_id,
'network_id': network_id,
'uuid': str(utils.gen_uuid())}
# try FLAG times to create a vif record with a unique mac_address
for _ in xrange(FLAGS.create_unique_mac_address_attempts):
try:
return self.db.virtual_interface_create(context, vif)
except exception.VirtualInterfaceCreateException:
vif['address'] = self.generate_mac_address()
else:
self.db.virtual_interface_delete_by_instance(context,
instance_id)
raise exception.VirtualInterfaceMacAddressException()
def generate_mac_address(self):
"""Generate an Ethernet MAC address."""
mac = [0x02, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):
"""Adds a fixed ip to an instance from specified network."""
networks = [self.db.network_get(context, network_id)]
self._allocate_fixed_ips(context, instance_id, host, networks)
def remove_fixed_ip_from_instance(self, context, instance_id, address):
"""Removes a fixed ip from an instance from specified network."""
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
for fixed_ip in fixed_ips:
if fixed_ip['address'] == address:
self.deallocate_fixed_ip(context, address)
return
raise exception.FixedIpNotFoundForSpecificInstance(
instance_id=instance_id, ip=address)
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
# and use that network here with a method like
# network_get_by_compute_host
address = None
if network['cidr']:
address = kwargs.get('address', None)
if address:
address = self.db.fixed_ip_associate(context,
address, instance_id,
network['id'])
else:
address = self.db.fixed_ip_associate_pool(context.elevated(),
network['id'],
instance_id)
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
get_vif = self.db.virtual_interface_get_by_instance_and_network
vif = get_vif(context, instance_id, network['id'])
values = {'allocated': True,
'virtual_interface_id': vif['id']}
self.db.fixed_ip_update(context, address, values)
self._setup_network(context, network)
return address
def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address,
{'allocated': False,
'virtual_interface_id': None})
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
instance_id = instance_ref['id']
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
if FLAGS.force_dhcp_release:
dev = self.driver.get_dev(fixed_ip_ref['network'])
vif = self.db.virtual_interface_get_by_instance_and_network(
context, instance_ref['id'], fixed_ip_ref['network']['id'])
self.driver.release_dhcp(dev, address, vif['address'])
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
LOG.debug(_('Leased IP |%(address)s|'), locals(), context=context)
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
instance = fixed_ip['instance']
if not instance:
raise exception.Error(_('IP %s leased that is not associated') %
address)
now = utils.utcnow()
self.db.fixed_ip_update(context,
fixed_ip['address'],
{'leased': True,
'updated_at': now})
if not fixed_ip['allocated']:
LOG.warn(_('IP |%s| leased that isn\'t allocated'), address,
context=context)
def release_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is released."""
LOG.debug(_('Released IP |%(address)s|'), locals(), context=context)
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
instance = fixed_ip['instance']
if not instance:
raise exception.Error(_('IP %s released that is not associated') %
address)
if not fixed_ip['leased']:
LOG.warn(_('IP %s released that was not leased'), address,
context=context)
self.db.fixed_ip_update(context,
fixed_ip['address'],
{'leased': False})
if not fixed_ip['allocated']:
self.db.fixed_ip_disassociate(context, address)
# NOTE(vish): dhcp server isn't updated until next setup, this
# means there will stale entries in the conf file
# the code below will update the file if necessary
if FLAGS.update_dhcp_on_disassociate:
network_ref = self.db.fixed_ip_get_network(context, address)
self._setup_network(context, network_ref)
def create_networks(self, context, label, cidr, multi_host, num_networks,
network_size, cidr_v6, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None, **kwargs):
"""Create networks based on parameters."""
# NOTE(jkoelker): these are dummy values to make sure iter works
fixed_net_v4 = netaddr.IPNetwork('0/32')
fixed_net_v6 = netaddr.IPNetwork('::0/128')
subnets_v4 = []
subnets_v6 = []
subnet_bits = int(math.ceil(math.log(network_size, 2)))
if cidr_v6:
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
prefixlen_v6 = 128 - subnet_bits
subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks)
if cidr:
fixed_net_v4 = netaddr.IPNetwork(cidr)
prefixlen_v4 = 32 - subnet_bits
subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
count=num_networks))
# NOTE(jkoelker): This replaces the _validate_cidrs call and
# prevents looping multiple times
try:
nets = self.db.network_get_all(context)
except exception.NoNetworksFound:
nets = []
used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets]
def find_next(subnet):
next_subnet = subnet.next()
while next_subnet in subnets_v4:
next_subnet = next_subnet.next()
if next_subnet in fixed_net_v4:
return next_subnet
for subnet in list(subnets_v4):
if subnet in used_subnets:
next_subnet = find_next(subnet)
if next_subnet:
subnets_v4.remove(subnet)
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
raise ValueError(_('cidr already in use'))
for used_subnet in used_subnets:
if subnet in used_subnet:
msg = _('requested cidr (%(cidr)s) conflicts with '
'existing supernet (%(super)s)')
raise ValueError(msg % {'cidr': subnet,
'super': used_subnet})
if used_subnet in subnet:
next_subnet = find_next(subnet)
if next_subnet:
subnets_v4.remove(subnet)
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
msg = _('requested cidr (%(cidr)s) conflicts '
'with existing smaller cidr '
'(%(smaller)s)')
raise ValueError(msg % {'cidr': subnet,
'smaller': used_subnet})
networks = []
subnets = itertools.izip_longest(subnets_v4, subnets_v6)
for index, (subnet_v4, subnet_v6) in enumerate(subnets):
net = {}
net['bridge'] = bridge
net['bridge_interface'] = bridge_interface
net['multi_host'] = multi_host
net['dns1'] = dns1
net['dns2'] = dns2
if num_networks > 1:
net['label'] = '%s_%d' % (label, index)
else:
net['label'] = label
if cidr and subnet_v4:
net['cidr'] = str(subnet_v4)
net['netmask'] = str(subnet_v4.netmask)
net['gateway'] = str(subnet_v4[1])
net['broadcast'] = str(subnet_v4.broadcast)
net['dhcp_start'] = str(subnet_v4[2])
if cidr_v6 and subnet_v6:
net['cidr_v6'] = str(subnet_v6)
if gateway_v6:
# use a pre-defined gateway if one is provided
net['gateway_v6'] = str(gateway_v6)
else:
net['gateway_v6'] = str(subnet_v6[1])
net['netmask_v6'] = str(subnet_v6._prefixlen)
if kwargs.get('vpn', False):
# this bit here is for vlan-manager
del net['dns1']
del net['dns2']
vlan = kwargs['vlan_start'] + index
net['vpn_private_address'] = str(subnet_v4[2])
net['dhcp_start'] = str(subnet_v4[3])
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
# NOTE(vish): This makes ports unique accross the cloud, a more
# robust solution would be to make them uniq per ip
net['vpn_public_port'] = kwargs['vpn_start'] + index
# None if network with cidr or cidr_v6 already exists
network = self.db.network_create_safe(context, net)
if not network:
raise ValueError(_('Network already exists!'))
else:
networks.append(network)
if network and cidr and subnet_v4:
self._create_fixed_ips(context, network['id'])
return networks
def delete_network(self, context, fixed_range, require_disassociated=True):
network = db.network_get_by_cidr(context, fixed_range)
if require_disassociated and network.project_id is not None:
raise ValueError(_('Network must be disassociated from project %s'
' before delete' % network.project_id))
db.network_delete_safe(context, network.id)
@property
def _bottom_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the bottom of the range."""
return 2 # network, gateway
@property
def _top_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the top of the range."""
return 1 # broadcast
def _create_fixed_ips(self, context, network_id):
"""Create all fixed ips for network."""
network = self.db.network_get(context, network_id)
# NOTE(vish): Should these be properties of the network as opposed
# to properties of the manager class?
bottom_reserved = self._bottom_reserved_ips
top_reserved = self._top_reserved_ips
project_net = netaddr.IPNetwork(network['cidr'])
num_ips = len(project_net)
for index in range(num_ips):
address = str(project_net[index])
if index < bottom_reserved or num_ips - index < top_reserved:
reserved = True
else:
reserved = False
self.db.fixed_ip_create(context, {'network_id': network_id,
'address': address,
'reserved': reserved})
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
raise NotImplementedError()
def _setup_network(self, context, network_ref):
"""Sets up network on this host."""
raise NotImplementedError()
def validate_networks(self, context, networks):
"""check if the networks exists and host
is set to each network.
"""
if networks is None or len(networks) == 0:
return
network_uuids = [uuid for (uuid, fixed_ip) in networks]
self._get_networks_by_uuids(context, network_uuids)
for network_uuid, address in networks:
# check if the fixed IP address is valid and
# it actually belongs to the network
if address is not None:
if not utils.is_valid_ipv4(address):
raise exception.FixedIpInvalid(address=address)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context,
address)
if fixed_ip_ref['network']['uuid'] != network_uuid:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_uuid)
if fixed_ip_ref['instance'] is not None:
raise exception.FixedIpAlreadyInUse(address=address)
def _get_networks_by_uuids(self, context, network_uuids):
return self.db.network_get_all_by_uuids(context, network_uuids)
class FlatManager(NetworkManager):
"""Basic network where no vlans are used.
FlatManager does not do any bridge or vlan creation. The user is
responsible for setting up whatever bridges are specified when creating
networks through nova-manage. This bridge needs to be created on all
compute hosts.
The idea is to create a single network for the host with a command like:
nova-manage network create 192.168.0.0/24 1 256. Creating multiple
networks for for one manager is currently not supported, but could be
added by modifying allocate_fixed_ip and get_network to get the a network
with new logic instead of network_get_by_bridge. Arbitrary lists of
addresses in a single network can be accomplished with manual db editing.
If flat_injected is True, the compute host will attempt to inject network
config into the guest. It attempts to modify /etc/network/interfaces and
currently only works on debian based systems. To support a wider range of
OSes, some other method may need to be devised to let the guest know which
ip it should be using so that it can configure itself. Perhaps an attached
disk or serial device with configuration info.
Metadata forwarding must be handled by the gateway, and since nova does
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
"""
timeout_fixed_ips = False
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
requested_networks = kwargs.get('requested_networks')
for network in networks:
address = None
if requested_networks is not None:
for address in (fixed_ip for (uuid, fixed_ip) in \
requested_networks if network['uuid'] == uuid):
break
self.allocate_fixed_ip(context, instance_id,
network, address=address)
def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
super(FlatManager, self).deallocate_fixed_ip(context, address,
**kwargs)
self.db.fixed_ip_disassociate(context, address)
def _setup_network(self, context, network_ref):
"""Setup Network on this host."""
net = {}
net['injected'] = FLAGS.flat_injected
self.db.network_update(context, network_ref['id'], net)
class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
It never injects network settings into the guest. It also manages bridges.
Otherwise it behaves like FlatManager.
"""
SHOULD_CREATE_BRIDGE = True
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
self.driver.init_host()
self.driver.ensure_metadata_ip()
super(FlatDHCPManager, self).init_host()
self.init_host_floating_ips()
self.driver.metadata_forward()
def _setup_network(self, context, network_ref):
"""Sets up network on this host."""
network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
mac_address = self.generate_mac_address()
dev = self.driver.plug(network_ref, mac_address)
self.driver.initialize_gateway_device(dev, network_ref)
if not FLAGS.fake_network:
self.driver.update_dhcp(context, dev, network_ref)
if(FLAGS.use_ipv6):
self.driver.update_ra(context, dev, network_ref)
gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network_ref['id'],
{'gateway_v6': gateway})
class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
"""Vlan network with dhcp.
VlanManager is the most complicated. It will create a host-managed
vlan for each project. Each project gets its own subnet. The networks
and associated subnets are created with nova-manage using a command like:
nova-manage network create 10.0.0.0/8 3 16. This will create 3 networks
of 16 addresses from the beginning of the 10.0.0.0 range.
A dhcp server is run for each subnet, so each project will have its own.
For this mode to be useful, each project will need a vpn to access the
instances in its subnet.
"""
SHOULD_CREATE_BRIDGE = True
SHOULD_CREATE_VLAN = True
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
self.driver.init_host()
self.driver.ensure_metadata_ip()
NetworkManager.init_host(self)
self.init_host_floating_ips()
self.driver.metadata_forward()
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
if kwargs.get('vpn', None):
address = network['vpn_private_address']
self.db.fixed_ip_associate(context,
address,
instance_id,
reserved=True)
else:
address = kwargs.get('address', None)
if address:
address = self.db.fixed_ip_associate(context, address,
instance_id,
network['id'])
else:
address = self.db.fixed_ip_associate_pool(context,
network['id'],
instance_id)
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
vif = self.db.virtual_interface_get_by_instance_and_network(context,
instance_id,
network['id'])
values = {'allocated': True,
'virtual_interface_id': vif['id']}
self.db.fixed_ip_update(context, address, values)
self._setup_network(context, network)
return address
def add_network_to_project(self, context, project_id):
"""Force adds another network to a project."""
self.db.network_associate(context, project_id, force=True)
def _get_networks_for_instance(self, context, instance_id, project_id,
requested_networks=None):
"""Determine which networks an instance should connect to."""
# get networks associated with project
if requested_networks is not None and len(requested_networks) != 0:
network_uuids = [uuid for (uuid, fixed_ip) in requested_networks]
networks = self.db.network_get_all_by_uuids(context,
network_uuids,
project_id)
else:
networks = self.db.project_get_networks(context, project_id)
return networks
def create_networks(self, context, **kwargs):
"""Create networks based on parameters."""
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
if kwargs['num_networks'] + kwargs['vlan_start'] > 4094:
raise ValueError(_('The sum between the number of networks and'
' the vlan start cannot be greater'
' than 4094'))
# check that num networks and network size fits in fixed_net
fixed_net = netaddr.IPNetwork(kwargs['cidr'])
if len(fixed_net) < kwargs['num_networks'] * kwargs['network_size']:
raise ValueError(_('The network range is not big enough to fit '
'%(num_networks)s. Network size is %(network_size)s') %
kwargs)
NetworkManager.create_networks(self, context, vpn=True, **kwargs)
def _setup_network(self, context, network_ref):
"""Sets up network on this host."""
if not network_ref['vpn_public_address']:
net = {}
address = FLAGS.vpn_ip
net['vpn_public_address'] = address
network_ref = db.network_update(context, network_ref['id'], net)
else:
address = network_ref['vpn_public_address']
network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
mac_address = self.generate_mac_address()
dev = self.driver.plug(network_ref, mac_address)
self.driver.initialize_gateway_device(dev, network_ref)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
if address == FLAGS.vpn_ip and hasattr(self.driver,
"ensure_vpn_forward"):
self.driver.ensure_vpn_forward(FLAGS.vpn_ip,
network_ref['vpn_public_port'],
network_ref['vpn_private_address'])
if not FLAGS.fake_network:
self.driver.update_dhcp(context, dev, network_ref)
if(FLAGS.use_ipv6):
self.driver.update_ra(context, dev, network_ref)
gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network_ref['id'],
{'gateway_v6': gateway})
def _get_networks_by_uuids(self, context, network_uuids):
return self.db.network_get_all_by_uuids(context, network_uuids,
context.project_id)
@property
def _bottom_reserved_ips(self):
"""Number of reserved ips at the bottom of the range."""
return super(VlanManager, self)._bottom_reserved_ips + 1 # vpn server
@property
def _top_reserved_ips(self):
"""Number of reserved ips at the top of the range."""
parent_reserved = super(VlanManager, self)._top_reserved_ips
return parent_reserved + FLAGS.cnt_vpn_clients
| xushiwei/nova | nova/network/manager.py | Python | apache-2.0 | 51,086 |
from __future__ import absolute_import
import unittest
from testutils import ADMIN_CLIENT
from testutils import TEARDOWN
from library.user import User
from library.project import Project
from library.repository import Repository
from library.repository import pull_harbor_image
from library.repository import push_image_to_project
from testutils import harbor_server
from library.base import _assert_status_code
class TestProjects(unittest.TestCase):
@classmethod
def setUp(self):
project = Project()
self.project= project
user = User()
self.user= user
repo = Repository()
self.repo= repo
@classmethod
def tearDown(self):
print "Case completed"
@unittest.skipIf(TEARDOWN == False, "Test data won't be erased.")
def test_ClearData(self):
#1. Delete repository(RA) by user(UA);
self.repo.delete_repoitory(TestProjects.repo_name_in_project_a, **TestProjects.USER_RA_CLIENT)
self.repo.delete_repoitory(TestProjects.repo_name_in_project_b, **TestProjects.USER_RA_CLIENT)
self.repo.delete_repoitory(TestProjects.repo_name_in_project_c, **TestProjects.USER_RA_CLIENT)
self.repo.delete_repoitory(TestProjects.repo_name_pa, **TestProjects.USER_RA_CLIENT)
#2. Delete project(PA);
self.project.delete_project(TestProjects.project_ra_id_a, **TestProjects.USER_RA_CLIENT)
self.project.delete_project(TestProjects.project_ra_id_b, **TestProjects.USER_RA_CLIENT)
self.project.delete_project(TestProjects.project_ra_id_c, **TestProjects.USER_RA_CLIENT)
#3. Delete user(UA);
self.user.delete_user(TestProjects.user_ra_id, **ADMIN_CLIENT)
def testRobotAccount(self):
"""
Test case:
Robot Account
Test step and expected result:
1. Create user(UA);
2. Create private project(PA), private project(PB) and public project(PC) by user(UA);
3. Push image(ImagePA) to project(PA), image(ImagePB) to project(PB) and image(ImagePC) to project(PC) by user(UA);
4. Create a new robot account(RA) with pull and push privilige in project(PA) by user(UA);
5. Check robot account info, it should has both pull and push priviliges;
6. Pull image(ImagePA) from project(PA) by robot account(RA), it must be successful;
7. Push image(ImageRA) to project(PA) by robot account(RA), it must be successful;
8. Push image(ImageRA) to project(PB) by robot account(RA), it must be not successful;
9. Pull image(ImagePB) from project(PB) by robot account(RA), it must be not successful;
10. Pull image from project(PC), it must be successful;
11. Push image(ImageRA) to project(PC) by robot account(RA), it must be not successful;
12. Update action property of robot account(RA);
13. Pull image(ImagePA) from project(PA) by robot account(RA), it must be not successful;
14. Push image(ImageRA) to project(PA) by robot account(RA), it must be not successful;
15. Push image(ImageRA) to project(PA) by robot account(RA), it must be not successful;
Tear down:
1. Delete project(PA) (PB) (PC);
2. Delete user(UA).
"""
url = ADMIN_CLIENT["endpoint"]
admin_name = ADMIN_CLIENT["username"]
admin_password = ADMIN_CLIENT["password"]
user_ra_password = "Aa123456"
image_project_a = "tomcat"
image_project_b = "hello-world"
image_project_c = "mysql"
image_robot_account = "mariadb"
tag = "latest"
print "#1. Create user(UA);"
TestProjects.user_ra_id, user_ra_name = self.user.create_user(user_password = user_ra_password, **ADMIN_CLIENT)
TestProjects.USER_RA_CLIENT=dict(endpoint = url, username = user_ra_name, password = user_ra_password)
print "#2. Create private project(PA), private project(PB) and public project(PC) by user(UA);"
TestProjects.project_ra_id_a, project_ra_name_a = self.project.create_project(metadata = {"public": "false"}, **TestProjects.USER_RA_CLIENT)
TestProjects.project_ra_id_b, project_ra_name_b = self.project.create_project(metadata = {"public": "false"}, **TestProjects.USER_RA_CLIENT)
TestProjects.project_ra_id_c, project_ra_name_c = self.project.create_project(metadata = {"public": "true"}, **TestProjects.USER_RA_CLIENT)
print "#3. Push image(ImagePA) to project(PA), image(ImagePB) to project(PB) and image(ImagePC) to project(PC) by user(UA);"
TestProjects.repo_name_in_project_a, tag_a = push_image_to_project(project_ra_name_a, harbor_server, user_ra_name, user_ra_password, image_project_a, tag)
TestProjects.repo_name_in_project_b, tag_b = push_image_to_project(project_ra_name_b, harbor_server, user_ra_name, user_ra_password, image_project_b, tag)
TestProjects.repo_name_in_project_c, tag_c = push_image_to_project(project_ra_name_c, harbor_server, user_ra_name, user_ra_password, image_project_c, tag)
print "#4. Create a new robot account(RA) with pull and push privilige in project(PA) by user(UA);"
robot_id, robot_account = self.project.add_project_robot_account(TestProjects.project_ra_id_a, project_ra_name_a, **TestProjects.USER_RA_CLIENT)
print robot_account.name
print robot_account.token
print "#5. Check robot account info, it should has both pull and push priviliges;"
data = self.project.get_project_robot_account_by_id(TestProjects.project_ra_id_a, robot_id, **TestProjects.USER_RA_CLIENT)
_assert_status_code(robot_account.name, data.name)
print "#6. Pull image(ImagePA) from project(PA) by robot account(RA), it must be successful;"
pull_harbor_image(harbor_server, robot_account.name, robot_account.token, TestProjects.repo_name_in_project_a, tag_a)
print "#7. Push image(ImageRA) to project(PA) by robot account(RA), it must be successful;"
TestProjects.repo_name_pa, _ = push_image_to_project(project_ra_name_a, harbor_server, robot_account.name, robot_account.token, image_robot_account, tag)
print "#8. Push image(ImageRA) to project(PB) by robot account(RA), it must be not successful;"
push_image_to_project(project_ra_name_b, harbor_server, robot_account.name, robot_account.token, image_robot_account, tag, expected_error_message = "denied: requested access to the resource is denied")
print "#9. Pull image(ImagePB) from project(PB) by robot account(RA), it must be not successful;"
pull_harbor_image(harbor_server, robot_account.name, robot_account.token, TestProjects.repo_name_in_project_b, tag_b, expected_error_message = r"pull access denied for " + harbor_server + "/" + TestProjects.repo_name_in_project_b)
print "#10. Pull image from project(PC), it must be successful;"
pull_harbor_image(harbor_server, robot_account.name, robot_account.token, TestProjects.repo_name_in_project_c, tag_c)
print "#11. Push image(ImageRA) to project(PC) by robot account(RA), it must be not successful;"
push_image_to_project(project_ra_name_c, harbor_server, robot_account.name, robot_account.token, image_robot_account, tag, expected_error_message = "denied: requested access to the resource is denied")
print "#12. Update action property of robot account(RA);"
self.project.disable_project_robot_account(TestProjects.project_ra_id_a, robot_id, True, **TestProjects.USER_RA_CLIENT)
print "#13. Pull image(ImagePA) from project(PA) by robot account(RA), it must be not successful;"
pull_harbor_image(harbor_server, robot_account.name, robot_account.token, TestProjects.repo_name_in_project_a, tag_a, expected_login_error_message = "401 Client Error: Unauthorized")
print "#14. Push image(ImageRA) to project(PA) by robot account(RA), it must be not successful;"
push_image_to_project(project_ra_name_a, harbor_server, robot_account.name, robot_account.token, image_robot_account, tag, expected_login_error_message = "401 Client Error: Unauthorized")
print "#15. Delete robot account(RA), it must be not successful;"
self.project.delete_project_robot_account(TestProjects.project_ra_id_a, robot_id, **TestProjects.USER_RA_CLIENT)
if __name__ == '__main__':
unittest.main() | steven-zou/harbor | tests/apitests/python/test_robot_account.py | Python | apache-2.0 | 8,278 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from keystone import auth
from keystone.auth.plugins import base
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit.ksfixtures import auth_plugins
# for testing purposes only
METHOD_NAME = 'simple_challenge_response'
METHOD_OPTS = {
METHOD_NAME:
'keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse',
}
EXPECTED_RESPONSE = uuid.uuid4().hex
DEMO_USER_ID = uuid.uuid4().hex
class SimpleChallengeResponse(base.AuthMethodHandler):
def authenticate(self, context, auth_payload, user_context):
if 'response' in auth_payload:
if auth_payload['response'] != EXPECTED_RESPONSE:
raise exception.Unauthorized('Wrong answer')
user_context['user_id'] = DEMO_USER_ID
else:
return {"challenge": "What's the name of your high school?"}
class TestAuthPlugin(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
super(TestAuthPlugin, self).setUp()
self.api = auth.controllers.Auth()
def test_unsupported_auth_method(self):
method_name = uuid.uuid4().hex
auth_data = {'methods': [method_name]}
auth_data[method_name] = {'test': 'test'}
auth_data = {'identity': auth_data}
self.assertRaises(exception.AuthMethodNotSupported,
auth.controllers.AuthInfo.create,
auth_data)
def test_addition_auth_steps(self):
self.useFixture(
auth_plugins.ConfigAuthPlugins(self.config_fixture,
methods=[METHOD_NAME],
**METHOD_OPTS))
self.useFixture(auth_plugins.LoadAuthPlugins(METHOD_NAME))
auth_data = {'methods': [METHOD_NAME]}
auth_data[METHOD_NAME] = {
'test': 'test'}
auth_data = {'identity': auth_data}
auth_info = auth.controllers.AuthInfo.create(auth_data)
auth_context = {'extras': {}, 'method_names': []}
try:
self.api.authenticate(self.make_request(), auth_info, auth_context)
except exception.AdditionalAuthRequired as e:
self.assertIn('methods', e.authentication)
self.assertIn(METHOD_NAME, e.authentication['methods'])
self.assertIn(METHOD_NAME, e.authentication)
self.assertIn('challenge', e.authentication[METHOD_NAME])
# test correct response
auth_data = {'methods': [METHOD_NAME]}
auth_data[METHOD_NAME] = {
'response': EXPECTED_RESPONSE}
auth_data = {'identity': auth_data}
auth_info = auth.controllers.AuthInfo.create(auth_data)
auth_context = {'extras': {}, 'method_names': []}
self.api.authenticate(self.make_request(), auth_info, auth_context)
self.assertEqual(DEMO_USER_ID, auth_context['user_id'])
# test incorrect response
auth_data = {'methods': [METHOD_NAME]}
auth_data[METHOD_NAME] = {
'response': uuid.uuid4().hex}
auth_data = {'identity': auth_data}
auth_info = auth.controllers.AuthInfo.create(auth_data)
auth_context = {'extras': {}, 'method_names': []}
self.assertRaises(exception.Unauthorized,
self.api.authenticate,
self.make_request(),
auth_info,
auth_context)
def test_duplicate_method(self):
# Having the same method twice doesn't cause load_auth_methods to fail.
self.useFixture(
auth_plugins.ConfigAuthPlugins(self.config_fixture,
['external', 'external']))
auth.controllers.load_auth_methods()
self.assertIn('external', auth.controllers.AUTH_METHODS)
class TestAuthPluginDynamicOptions(TestAuthPlugin):
def config_overrides(self):
super(TestAuthPluginDynamicOptions, self).config_overrides()
# Clear the override for the [auth] ``methods`` option so it is
# possible to load the options from the config file.
self.config_fixture.conf.clear_override('methods', group='auth')
def config_files(self):
config_files = super(TestAuthPluginDynamicOptions, self).config_files()
config_files.append(unit.dirs.tests_conf('test_auth_plugin.conf'))
return config_files
class TestMapped(unit.TestCase):
def setUp(self):
super(TestMapped, self).setUp()
self.api = auth.controllers.Auth()
def config_files(self):
config_files = super(TestMapped, self).config_files()
config_files.append(unit.dirs.tests_conf('test_auth_plugin.conf'))
return config_files
def _test_mapped_invocation_with_method_name(self, method_name):
with mock.patch.object(auth.plugins.mapped.Mapped,
'authenticate',
return_value=None) as authenticate:
request = self.make_request()
auth_data = {
'identity': {
'methods': [method_name],
method_name: {'protocol': method_name},
}
}
auth_info = auth.controllers.AuthInfo.create(auth_data)
auth_context = {'extras': {},
'method_names': [],
'user_id': uuid.uuid4().hex}
self.api.authenticate(request, auth_info, auth_context)
# make sure Mapped plugin got invoked with the correct payload
((context, auth_payload, auth_context),
kwargs) = authenticate.call_args
self.assertEqual(method_name, auth_payload['protocol'])
def test_mapped_with_remote_user(self):
method_name = 'saml2'
auth_data = {'methods': [method_name]}
# put the method name in the payload so its easier to correlate
# method name with payload
auth_data[method_name] = {'protocol': method_name}
auth_data = {'identity': auth_data}
auth_context = {'extras': {},
'method_names': [],
'user_id': uuid.uuid4().hex}
self.useFixture(auth_plugins.LoadAuthPlugins(method_name))
with mock.patch.object(auth.plugins.mapped.Mapped,
'authenticate',
return_value=None) as authenticate:
auth_info = auth.controllers.AuthInfo.create(auth_data)
request = self.make_request(environ={'REMOTE_USER': '[email protected]'})
self.api.authenticate(request, auth_info, auth_context)
# make sure Mapped plugin got invoked with the correct payload
((context, auth_payload, auth_context),
kwargs) = authenticate.call_args
self.assertEqual(method_name, auth_payload['protocol'])
def test_supporting_multiple_methods(self):
method_names = ('saml2', 'openid', 'x509')
self.useFixture(auth_plugins.LoadAuthPlugins(*method_names))
for method_name in method_names:
self._test_mapped_invocation_with_method_name(method_name)
| cernops/keystone | keystone/tests/unit/test_auth_plugin.py | Python | apache-2.0 | 7,772 |
import pandas as pd
import numpy as np
import os
from datetime import datetime
import numbers
import requests
import re
from sklearn.linear_model import LinearRegression
from statsmodels.api import OLS
import cPickle
from sklearn.cluster import DBSCAN
import requests
import json
from matplotlib.path import Path
from matplotlib import colors, cm
class BaltimoreData():
def __init__(self, *args):
self.DATA_PATH = os.path.join(os.path.dirname(__file__), "data/baltimore/")
self.NEIGHBORHOOD_URL = "http://catalog.civicdashboards.com/dataset/e90d8498-44dd-4390-9bb9-5a53e85221eb/resource/6045d7d0-263e-416c-80fe-af1fb9f30650/download/3327ba9ba6f54cfdb9a5ef18244ae710temp.geojson"
self.CSV_FILE = self.DATA_PATH + "Baltimore_Complaint_Data.csv"
self.df = pd.DataFrame()
self.meta = dict()
self.args = args
def filter_df(self, df):
for arg in self.args:
assert len(arg)==2, "Filter must define field and filter values"
assert arg[0] in df.columns
key = arg[0]
val = self._set_list(arg[1])
df = df[df[key].isin(val)].reset_index(drop=True)
return df
def initData(self, **kwargs):
if 'download_data' in kwargs:
if kwargs['download_data']:
self.pull_data()
if 'download_metadata' in kwargs:
if kwargs['download_metadata']:
self.pull_metadata()
if 'limit' in kwargs:
if kwargs['limit']:
limit = kwargs['limit']
else:
limit = None
if 'repull' in kwargs:
if kwargs['repull']:
self.read_data(limit=limit)
self._apply_weapons_flag()
self.read_meta()
self.df['CITY'] = 'Baltimore'
return self
def _split_latlng(self):
Lat_Lng = self.df['Location'].str.replace('\(|\)', '').str.split(', ')
self.df['Latitude'] = Lat_Lng.map(lambda x: x[0])
self.df['Longitude'] = Lat_Lng.map(lambda x: x[1])
return self
def read_data(self, limit=None):
self.df = pd.read_csv(self.CSV_FILE, nrows=limit)
self.df.rename(columns={'Location': 'Address', 'CrimeDate': 'Date', 'Inside/Outside': 'Location Description', 'Location 1': 'Location', 'District': 'DIST_NUM', 'Description': 'Primary Type', 'Weapon': 'Description'}, inplace=True)
self.df = self.df[self.df.Location.notnull()].reset_index(drop=True)
self._split_latlng()
self.df['Location Description'] = self.df['Location Description'].apply(self.location_descriptions)
return self
@staticmethod
def location_descriptions(x):
if x=="I":
return "Inside"
elif x=="O":
return "Outside"
else:
return x
def read_meta(self):
self.meta['census'] = self._read_census()
self.meta['community'] = self._read_community()
self.meta['ward'] = self._read_ward()
self.meta['neighborhood'] = self._read_neighborhood()
def _read_census(self):
demo_census = self._read_demo_census()
housing_census = self._read_demo_census()
family_census = self._read_family_census()
crime_census = self._read_crime_census()
workforce_census = self._read_workforce_census()
arts_census = self._read_arts_census()
education_census = self._read_education_census()
sustainability_census = self._read_sustainability_census()
census = demo_census
census = census.merge(housing_census, on='COMMUNITY AREA NAME')
census = census.merge(family_census, on='COMMUNITY AREA NAME')
census = census.merge(housing_census, on='COMMUNITY AREA NAME')
census = census.merge(crime_census, on='COMMUNITY AREA NAME')
census = census.merge(workforce_census, on='COMMUNITY AREA NAME')
census = census.merge(arts_census, on='COMMUNITY AREA NAME')
census = census.merge(education_census, on='COMMUNITY AREA NAME')
census = census.merge(sustainability_census, on='COMMUNITY AREA NAME')
return census [[c for c in census.columns if c[-2:] not in ('_x', '_y')]]
def _read_community(self):
community = pd.read_csv(self.DATA_PATH + 'BNIA_neighborhood.csv').rename(columns={'CSA2010': 'Community Area', 'the_geom': 'the_geom_community'})
community['COMMUNITY'] = community['Community Area'].str.upper()
return community
def _read_neighborhood(self):
neighborhood = pd.read_csv(self.DATA_PATH + 'neighborhood.csv').rename(columns={'NBRDESC': 'NEIGHBORHOOD', 'LABEL': 'Neighborhood', 'the_geom': 'the_geom_neighborhood'})
return neighborhood
def _read_ward(self):
ward = pd.read_csv(self.DATA_PATH + 'ward.csv').rename(columns={'NAME_1': 'Ward'})
return ward
def _read_demo_census(self):
census_demo = pd.read_excel(self.DATA_PATH + 'BNIA_demo_data.csv', header=1).rename(columns={'CSA2010': 'COMMUNITY AREA NAME'})
return census_demo
def _read_housing_census(self):
census_action = pd.read_excel(self.DATA_PATH + 'BNIA_housing_data.csv', header=1).rename(columns={'CSA2010': 'COMMUNITY AREA NAME'})
return census_action
def _read_family_census(self):
census_action = pd.read_excel(self.DATA_PATH + 'BNIA_family_data.csv', header=1).rename(columns={'CSA2010': 'COMMUNITY AREA NAME'})
return census_action
def _read_crime_census(self):
census_action = pd.read_excel(self.DATA_PATH + 'BNIA_crime_data.csv', header=1).rename(columns={'CSA2010': 'COMMUNITY AREA NAME'})
return census_action
def _read_workforce_census(self):
census_action = pd.read_excel(self.DATA_PATH + 'BNIA_workforce_data.csv', header=1).rename(columns={'CSA2010': 'COMMUNITY AREA NAME'})
return census_action
def _read_arts_census(self):
census_action = pd.read_excel(self.DATA_PATH + 'BNIA_arts_data.csv', header=1).rename(columns={'CSA2010': 'COMMUNITY AREA NAME'})
return census_action
def _read_education_census(self):
census_action = pd.read_excel(self.DATA_PATH + 'BNIA_education_data.csv', header=1).rename(columns={'CSA2010': 'COMMUNITY AREA NAME'})
return census_action
def _read_sustainability_census(self):
census_action = pd.read_excel(self.DATA_PATH + 'BNIA_sustainability_data.csv', header=1).rename(columns={'CSA2010': 'COMMUNITY AREA NAME'})
return census_action
def pull_data(self):
os.system("curl 'https://data.baltimorecity.gov/api/views/v9wg-c9g7/rows.csv?accessType=DOWNLOAD' -o '%sBaltimore_Complaint_Data.csv'" % self.DATA_PATH)
return self
def pull_metadata(self):
os.system("curl 'http://bniajfi.org/wp-content/uploads/2016/04/VS-14-Census-2010-2014.xlsx' -o '%sBNIA_demo_data.csv'" % self.DATA_PATH)
os.system("curl 'http://bniajfi.org/wp-content/uploads/2016/04/VS-14-Housing-2010-2014.xlsx' -o '%sBNIA_housing_data.csv'" % self.DATA_PATH)
os.system("curl 'http://bniajfi.org/wp-content/uploads/2016/04/VS-14-Children-and-Family-Health-2010-2014.xlsx' -o '%sBNIA_family_data.csv'" % self.DATA_PATH)
os.system("curl 'http://bniajfi.org/wp-content/uploads/2016/04/VS14-Crime-2010-2014.xlsx' -o '%sBNIA_crime_data.csv'" % self.DATA_PATH)
os.system("curl 'http://bniajfi.org/wp-content/uploads/2016/04/VS-14-Workforce-2010-2014.xlsx' -o '%sBNIA_workforce_data.csv'" % self.DATA_PATH)
os.system("curl 'http://bniajfi.org/wp-content/uploads/2016/04/VS-14-Arts-2011-2014.xlsx' -o '%sBNIA_arts_data.csv'" % self.DATA_PATH)
os.system("curl 'http://bniajfi.org/wp-content/uploads/2016/04/VS-14-Education-2010-2014.xlsx' -o '%sBNIA_education_data.csv'" % self.DATA_PATH)
os.system("curl 'http://bniajfi.org/wp-content/uploads/2016/04/VS-14-Sustainability-2010-2014.xlsx' -o '%sBNIA_sustainability_data.csv'" % self.DATA_PATH)
os.system("curl 'https://data.baltimorecity.gov/api/views/5j2q-jsy4/rows.csv?accessType=DOWNLOAD' -o '%sward.csv'" % self.DATA_PATH)
os.system("curl 'https://data.baltimorecity.gov/api/views/i49u-94ea/rows.csv?accessType=DOWNLOAD' -o '%sBNIA_neighborhood.csv'" % self.DATA_PATH)
os.system("curl 'https://data.baltimorecity.gov/api/views/h3fx-54q3/rows.csv?accessType=DOWNLOAD' -o '%sneighborhood.csv'" % self.DATA_PATH)
return self
def get_community_name(self, df):
df = self._get_area_name(df, 'community', 'Community Area')
df['Community Area Number'] = df['Community Area']
return df
def get_ward_name(self, df):
df = self._get_area_name(df, 'ward', 'Ward')
return df
def get_district_name(self, df):
df = self._get_area_name(df, 'district', 'DIST_NUM')
return df
def _get_area_name(self, df, meta_key, col):
area_data = self.meta[meta_key].copy()
area_data = self.geom_to_list(area_data)
for c in area_data.columns:
if re.match('the_geom.*', c):
self.meta[meta_key]['path'] = area_data[c].map(lambda x: Path(x))
df[col] = df.index.map(lambda x: self._match_neighborhood(x, df, meta_key, col))
df[col] = df[col].map(lambda x: x[0] if len(x)>0 else np.nan)
df = df.merge(self.meta[meta_key], how='left', on=col, suffixes=('_%s' % meta_key, ''))
df.rename(columns={'the_geom': 'the_geom_%s' % meta_key}, inplace=True)
return df[df[col].notnull()]
def _match_neighborhood(self, x, df, meta_key, col):
lat = float(df.ix[x]['Latitude'])
lng = float(df.ix[x]['Longitude'])
area_data = self.meta[meta_key].copy()
if meta_key=='community':
area_data['use_flag'] = area_data['COMMUNITY'].map(lambda x: 1 if not re.match('park-cemetery-etc.*|airport', x.lower()) else 0)
area_data = area_data[area_data.use_flag==1]
return [row[col] for i, row in area_data.iterrows() if row['path'].contains_point([lat, lng])]
def read_census_extended(self, values=None):
census_extended = self._read_census()
census_extended['COMMUNITY AREA NAME'] = census_extended['COMMUNITY AREA NAME'].map(lambda x: x.upper())
return census_extended
@classmethod
def geom_to_list(cls, df):
for c in df.columns:
if re.match('the_geom.*', c):
df[c] = df[c].map(lambda x: cls._parse_geom(x))
return df
@staticmethod
def _parse_geom(coords):
if isinstance(coords, basestring):
if str(coords) != '0':
coord_sets = re.match("MULTIPOLYGON \(\(\((.*)\)\)\)", coords).group(1)
coord_strings = [re.sub("\(|\)", "", c).split(" ") for c in coord_sets.split(", ")]
coord_list = tuple([(float(c[1]), float(c[0])) for c in coord_strings])
else:
coord_list = tuple([])
elif isinstance(coords, (list, tuple)):
coord_list = tuple(coords)
return coord_list
def communities(self, df):
community = dict()
census = self._read_census()
if set(['the_geom_community', 'Community Area']) < set(df.columns):
for index1, row1 in df.iterrows():
for index2, row2 in df.iterrows():
community.setdefault(row1['Community Area'], {})
community.setdefault(row2['Community Area'], {})
if index1 > index2:
geom1 = row1['the_geom_community']
geom2 = row2['the_geom_community']
boundary_intersect = set(geom1) & set(geom2)
if len(boundary_intersect) > 0:
community[row1['Community Area']].setdefault('adj_list', []).append(row2['Community Area'])
community[row2['Community Area']].setdefault('adj_list', []).append(row1['Community Area'])
community = pd.DataFrame(community).T
numeric_cols = census.columns.difference(['COMMUNITY AREA NAME'])
census[numeric_cols] = census[numeric_cols].fillna(0).applymap(lambda x: self._parse_pct(x))
census.index = census['COMMUNITY AREA NAME']
return pd.DataFrame(community).join(census).fillna(-1)
@staticmethod
def _parse_pct(x):
if isinstance(x, basestring):
x = re.match('.*(\d+).*', x)
if x:
if x[-1]=='%':
return float(x.group(1))/100.
else:
return float(x.group(1))
else:
return 0
else:
return float(x)
@staticmethod
def _set_list(f):
if not isinstance(f, list):
if isinstance(f, (basestring, numbers.Integral)):
return [f]
else:
return list(f)
else:
return f
def _model(self, X, y):
model = OLS(y, X)
result = model.fit()
print result.summary()
return result
def _apply_weapons_flag(self):
indexes = []
self.df['WEAPON_FLAG'] = 0
for i, row in self.df.iterrows():
if row['Description']:
if 'FIREARM' in str(row['Description']) or 'FIREARM' in str(row['Primary Type']):
indexes.append(i)
self.df.loc[indexes, 'WEAPON_FLAG'] = 1
return self
class PivotData(BaltimoreData):
def __init__(self, fields, dt_format, *args, **kwargs):
BaltimoreData.__init__(self, *args)
kwargs.setdefault('repull', False)
self.fields = self._set_list(fields)
self.dt_format = dt_format
if 'csv' in kwargs:
self.csv = self.DATA_PATH + kwargs['csv']
else:
self.csv = ""
if not kwargs['repull'] and os.path.isfile(self.csv):
self.initData(**kwargs)
self._data = pd.read_csv(self.csv)
else:
self.initData(**kwargs)
self.pivot()
def pivot(self):
data = self.df.copy()
data['Year'] = data['Date'].map(lambda x: datetime.strptime(x, '%m/%d/%Y').year)
data = self.filter_df(data)
if ('COMMUNITY' in self.fields) or ('Community Area' in self.fields) or ('Community Area Number' in self.fields):
data = self.get_community_name(data)
if 'Ward' in self.fields:
data = self.get_ward_name(data)
sep = '---'
data['Period'] = data['Date'].map(lambda x: datetime.strptime(x, '%m/%d/%Y').strftime(self.dt_format))
counts = data.fillna(0).groupby(['Period']+self.fields, as_index=False).count()
counts = counts.iloc[:, 0:len(self.fields)+2]
counts.columns = ['Period']+self.fields+['count']
for i, f in enumerate(self.fields):
field_counts = counts[f].map(lambda x: str(x))
if i==0:
counts['fields'] = field_counts
else:
counts['fields'] += sep+field_counts
pivot = counts.pivot('fields', 'Period', 'count')
pivot_split = pivot.reset_index().fields.str.split(sep, expand=True)
pivot_rename = pivot_split.rename(columns={int(k): v for k, v in enumerate(self.fields)})
self._data = pivot_rename.merge(pivot.reset_index(drop=True), left_index=True, right_index=True)
if self.csv:
self._data.to_csv(self.csv, index=False)
return self
def _date_cols(self):
return set(self._data.columns) - set(self.fields)
def norm_data(self, dt_filter, filter_zero=True):
data = self.data.copy()
data.loc[:, self.date_list] = data.loc[:, self.date_list].fillna(0)
norm = np.linalg.norm(data.loc[:, self.date_list].fillna(0))
data.loc[:, 'fill_opacity'] = data[dt_filter]/norm
data.loc[:, 'fill_opacity'] = data.loc[:, 'fill_opacity'] / max(data.loc[:, 'fill_opacity'] )
if filter_zero:
data = data[data[dt_filter]>0].reset_index(drop=True)
return data
def color_data(self, dt_filter, filter_zero=True):
h = cm.get_cmap('RdYlGn')
data = self.norm_data(dt_filter, filter_zero)
data.loc[:, 'fill_color'] = data.loc[:, 'fill_opacity'].map(lambda x: colors.rgb2hex(h(1.0-x)).upper())
return data
@property
def data(self):
return self._data
@property
def date_list(self):
dt_list = list(self._date_cols())
dt_list.sort()
return dt_list
if __name__=="__main__":
# csv = 'community_pivot.csv'
# fields = ['Community Area', 'COMMUNITY', 'the_geom_community']
# p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
# print '%s done' % csv
# csv = 'ward_marker.csv'
# fields = ['Latitude', 'Longitude', 'Ward', 'Primary Type']
# p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
# print '%s done' % csv
# csv = 'community_marker.csv'
# fields = ['Latitude', 'Longitude', 'Community Area', 'Primary Type']
# p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
# print '%s done' % csv
# csv = 'incident_marker.csv'
# fields = ['Latitude', 'Longitude', 'Location', 'Primary Type']
# p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
# print '%s done' % csv
# csv = 'heatmap.csv'
# fields = ['Latitude', 'Longitude']
# p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
# print '%s done' % csv
# csv = 'census_correlation.csv'
# fields = ['Community Area', 'COMMUNITY', 'the_geom_community']
# p = PivotData(fields, '%Y', ['WEAPON_FLAG', 1], ['Year', [2010, 2011, 2012, 2013, 2014]], csv=csv, repull=True)
# print '%s done' % csv
# csv = 'trends.csv'
# fields = ['CITY']
# p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
# print '%s done' % csv
csv = 'crime_location.csv'
fields = ['Primary Type', 'Location Description']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
print '%s done' % csv
# csv = 'district_marker.csv'
# fields = ['Latitude', 'Longitude', 'DIST_NUM', 'Primary Type']
# p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
# print '%s done' % csv
# csv = 'city_marker.csv'
# fields = ['Latitude', 'Longitude', 'CITY', 'Primary Type']
# p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
# print '%s done' % csv
# csv = 'crime_description.csv'
# fields = ['Primary Type', 'Description']
# p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
# print '%s done' % csv
| afenichel/ENGI4800-CAPSTONE | gunviolence/BaltimoreData.py | Python | apache-2.0 | 16,690 |
DEBUG = True
SECRET_KEY = 'this is a not very secret key'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'rdmo',
'USER': 'root',
'PASSWORD': '',
'TEST': {
'CHARSET': 'utf8',
'COLLATION': 'utf8_general_ci',
},
'OPTIONS': {
'init_command': "SET GLOBAL sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY',''));"
}
}
}
| DMPwerkzeug/DMPwerkzeug | testing/config/settings/mysql.py | Python | apache-2.0 | 456 |
import json
from django.http import JsonResponse
from .lib.scheduler import sch
from .lib.sun import *
from .lib.context import Context
from .lib.execution_element import ExecutionElement
def get_data():
return {
'ExecutionElements': json.loads(str(sch.context)),
'Now': Sun.Now().strftime('%H:%M:%S'),
'IsDay': sch.IsDay,
'Dawn': Sun.Dawn().strftime('%H:%M:%S'),
'Sunrise': Sun.Sunrise().strftime('%H:%M:%S'),
'Noon': Sun.Noon().strftime('%H:%M:%S'),
'Sunset': Sun.Sunset().strftime('%H:%M:%S'),
'Dusk' : Sun.Dusk().strftime('%H:%M:%S')
}
def status(request):
return JsonResponse(get_data(), safe=False)
def isday(request):
return JsonResponse(sch.IsDay, safe=False)
def set_execution_element(request, id, value):
Id = int(id)
if Id == 0:
set_all_execution_elements(request, value)
else:
Value = int(value)
sch.Context[Id].Overriden = True
sch.Context[Id].OverridenValue = Value
return JsonResponse(get_data(), safe=False)
def set_execution_element_auto(request, id):
Id = int(id)
if Id == 0:
set_all_execution_elements_auto(request)
else:
sch.Context[Id].Overriden = False
return JsonResponse(get_data(), safe=False)
def set_all_execution_elements(request, value):
Value = int(value)
for executionElement in sch.Context.ExecutionElements:
executionElement.Overriden = True
executionElement.OverridenValue = Value
def set_all_execution_elements_auto(request):
for executionElement in sch.Context.ExecutionElements:
executionElement.Overriden = False | marians20/Aquarium | automation/ajax.py | Python | apache-2.0 | 1,657 |
JANOME_VERSION='0.3.9'
| nakagami/janome | janome/version.py | Python | apache-2.0 | 23 |
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from testrunner.local import commands
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
class FuzzNativesTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(FuzzNativesTestSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = commands.Execute(
context.command_prefix +
[shell, "--allow-natives-syntax", "-e",
"try { var natives = %ListNatives();"
" for (var n in natives) { print(natives[n]); }"
"} catch(e) {}"] +
context.extra_flags)
if output.exit_code != 0:
print output.stdout
print output.stderr
assert False, "Failed to get natives list."
tests = []
for line in output.stdout.strip().split():
(name, argc) = line.split(",")
flags = ["--allow-natives-syntax",
"-e", "var NAME = '%s', ARGC = %s;" % (name, argc)]
test = testcase.TestCase(self, name, flags)
tests.append(test)
return tests
def GetFlagsForTestCase(self, testcase, context):
name = testcase.path
basefile = os.path.join(self.root, "base.js")
return testcase.flags + [basefile] + context.mode_flags
def GetSuite(name, root):
return FuzzNativesTestSuite(name, root)
| nextsmsversion/macchina.io | platform/JS/V8/v8-3.28.4/test/fuzz-natives/testcfg.py | Python | apache-2.0 | 1,591 |
from testlink.testlinkerrors import TLResponseError
class TestReporter(dict):
def __init__(self, tls, testcases, *args, **kwargs):
"""This can be given one or more testcases, but they all must have the same project, plan, and platform."""
super(TestReporter, self).__init__(*args, **kwargs)
self.tls = tls
# handle single testcase
self.testcases = testcases if isinstance(testcases, list) else [testcases]
self._plan_testcases = None
self.remove_non_report_kwargs()
self._platformname_generated = False
def remove_non_report_kwargs(self):
self.buildname = self.pop('buildname')
self.buildnotes = self.pop('buildnotes', "Created with automation.")
def setup_testlink(self):
"""Call properties that may set report kwarg values."""
self.testprojectname
self.testprojectid
self.testplanid
self.testplanname
self.platformname
self.platformid
self.buildid
def _get_project_name_by_id(self):
if self.testprojectid:
for project in self.tls.getProjects():
if project['id'] == self.testprojectid:
return project['name']
def _projectname_getter(self):
if not self.get('testprojectname') and self.testprojectid:
self['testprojectname'] = self._get_project_name_by_id()
return self.get('testprojectname')
@property
def testprojectname(self):
return self._projectname_getter()
def _get_project_id(self):
tpid = self.get('testprojectid')
if not tpid and self.testprojectname:
self['testprojectid'] = self.tls.getProjectIDByName(self['testprojectname'])
return self['testprojectid']
return tpid
def _get_project_id_or_none(self):
project_id = self._get_project_id()
# If not found the id will return as -1
if project_id == -1:
project_id = None
return project_id
@property
def testprojectid(self):
self['testprojectid'] = self._get_project_id_or_none()
return self.get('testprojectid')
@property
def testplanid(self):
return self.get('testplanid')
@property
def testplanname(self):
return self.get('testplanname')
@property
def platformname(self):
"""Return a platformname added to the testplan if there is one."""
return self.get('platformname')
@property
def platformid(self):
return self.get('platformid')
@property
def buildid(self):
return self.get('buildid')
@property
def plan_tcids(self):
if not self._plan_testcases:
self._plan_testcases = set()
tc_dict = self.tls.getTestCasesForTestPlan(self.testplanid)
try:
for _, platform in tc_dict.items():
for k, v in platform.items():
self._plan_testcases.add(v['full_external_id'])
except AttributeError:
# getTestCasesForTestPlan returns an empty list instead of an empty dict
pass
return self._plan_testcases
def reportgen(self):
"""For use if you need to look at the status returns of individual reporting."""
self.setup_testlink()
for testcase in self.testcases:
yield self.tls.reportTCResult(testcaseexternalid=testcase, **self)
def report(self):
for _ in self.reportgen():
pass
class AddTestReporter(TestReporter):
"""Add testcase to testplan if not added."""
def setup_testlink(self):
super(AddTestReporter, self).setup_testlink()
self.ensure_testcases_in_plan()
def ensure_testcases_in_plan(self):
# Get the platformid if possible or else addition will fail
self.platformid
for testcase in self.testcases:
# Can't check if testcase is in plan_tcids, because that won't work if it's there, but of the wrong platform
try:
self.tls.addTestCaseToTestPlan(
self.testprojectid, self.testplanid, testcase, self.get_latest_tc_version(testcase),
platformid=self.platformid
)
except TLResponseError as e:
# Test Case version is already linked to Test Plan
if e.code == 3045:
pass
else:
raise
def get_latest_tc_version(self, testcaseexternalid):
return int(self.tls.getTestCase(None, testcaseexternalid=testcaseexternalid)[0]['version'])
class AddTestPlanReporter(TestReporter):
@property
def testplanid(self):
if not self.get('testplanid'):
try:
self['testplanid'] = self.tls.getTestPlanByName(self.testprojectname, self.testplanname)[0]['id']
except TLResponseError as e:
# Name does not exist
if e.code == 3033:
self['testplanid'] = self.generate_testplanid()
else:
raise
except TypeError:
self['testplanid'] = self.generate_testplanid()
return self['testplanid']
def generate_testplanid(self):
"""This won't necessarily be able to create a testplanid. It requires a planname and projectname."""
if 'testplanname' not in self:
raise RuntimeError("Need testplanname to generate a testplan for results.")
tp = self.tls.createTestPlan(self['testplanname'], self.testprojectname)
self['testplanid'] = tp[0]['id']
return self['testplanid']
class AddPlatformReporter(TestReporter):
@property
def platformname(self):
"""Return a platformname added to the testplan if there is one."""
pn_kwarg = self.get('platformname')
if pn_kwarg and self._platformname_generated is False:
# If we try to create platform and catch platform already exists error (12000) it sometimes duplicates a
# platformname
try:
self.tls.addPlatformToTestPlan(self.testplanid, pn_kwarg)
except TLResponseError as e:
if int(e.code) == 235:
self.tls.createPlatform(self.testprojectname, pn_kwarg)
self.tls.addPlatformToTestPlan(self.testplanid, pn_kwarg)
else:
raise
self._platformname_generated = True
return pn_kwarg
@property
def platformid(self):
if not self.get('platformid'):
self['platformid'] = self.getPlatformID(self.platformname)
# This action is idempotent
self.tls.addPlatformToTestPlan(self.testplanid, self.platformname)
return self['platformid']
def getPlatformID(self, platformname, _firstrun=True):
"""
This is hardcoded for platformname to always be self.platformname
"""
platforms = self.tls.getTestPlanPlatforms(self.testplanid)
for platform in platforms:
if platform['name'] == platformname:
return platform['id']
# Platformname houses platform creation as platform creation w/o a name isn't possible
if not self.platformname:
raise RuntimeError(
"Couldn't find platformid for {}.{}, "
"please provide a platformname to generate.".format(self.testplanid, platformname)
)
if _firstrun is True:
return self.getPlatformID(self.platformname, _firstrun=False)
else:
raise RuntimeError("PlatformID not found after generated from platformname '{}' "
"in test plan {}.".format(self.platformname, self.testplanid))
class AddBuildReporter(TestReporter):
@property
def buildid(self):
bid = self.get('buildid')
if not bid or bid not in self.tls.getBuildsForTestPlan(self.testplanid):
self['buildid'] = self._generate_buildid()
return self.get('buildid')
def _generate_buildid(self):
r = self.tls.createBuild(self.testplanid, self.buildname, self.buildnotes)
return r[0]['id']
class TestGenReporter(AddTestReporter, AddBuildReporter, AddTestPlanReporter, AddPlatformReporter, TestReporter):
"""This is the default generate everything it can version of test reporting.
If you don't want to generate one of these values you can 'roll your own' version of this class with only the
needed features that you want to generate.
For example if you wanted to add platforms and/or tests to testplans, but didn't want to ever make a new testplan
you could use a class like:
`type('MyOrgTestGenReporter', (AddTestReporter, AddPlatformReporter, TestReporter), {})`
Example usage with fake testlink server test and a manual project.
```
tls = testlink.TestLinkHelper('https://testlink.corp.com/testlink/lib/api/xmlrpc/v1/xmlrpc.php',
'devkeyabc123').connect(testlink.TestlinkAPIClient)
tgr = TestGenReporter(tls, ['TEST-123'], testprojectname='MANUALLY_MADE_PROJECT', testplanname='generated',
platformname='gend', buildname='8.fake', status='p')
```
"""
| orenault/TestLink-API-Python-client | src/testlink/testreporter.py | Python | apache-2.0 | 9,333 |
# Copyright (c) 2015 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from sahara.plugins.cdh.v5_11_0 import edp_engine
from sahara.plugins.cdh.v5_11_0 import versionhandler
from sahara.tests.unit import base
class VersionHandlerTestCase(base.SaharaTestCase):
plugin_path = "sahara.plugins.cdh.v5_11_0."
cloudera_utils_path = plugin_path + "cloudera_utils.ClouderaUtilsV5110."
plugin_utils_path = plugin_path + "plugin_utils.PluginUtilsV5110."
def setUp(self):
super(VersionHandlerTestCase, self).setUp()
self.vh = versionhandler.VersionHandler()
def test_get_node_processes(self):
processes = self.vh.get_node_processes()
for k, v in six.iteritems(processes):
for p in v:
self.assertIsInstance(p, str)
@mock.patch("sahara.conductor.API.cluster_update")
@mock.patch("sahara.context.ctx")
@mock.patch(plugin_path + "deploy.configure_cluster")
@mock.patch(cloudera_utils_path + "get_cloudera_manager_info",
return_value={"fake_cm_info": "fake"})
def test_config_cluster(self, get_cm_info, configure_cluster,
ctx, cluster_update):
cluster = mock.Mock()
self.vh.configure_cluster(cluster)
configure_cluster.assert_called_once_with(cluster)
cluster_update.assert_called_once_with(
ctx(), cluster,
{'info': {"fake_cm_info": "fake"}})
@mock.patch(plugin_path + "deploy.start_cluster")
def test_start_cluster(self, start_cluster):
cluster = mock.Mock()
self.vh._set_cluster_info = mock.Mock()
self.vh.start_cluster(cluster)
start_cluster.assert_called_once_with(cluster)
self.vh._set_cluster_info.assert_called_once_with(cluster)
@mock.patch(plugin_path + "deploy.decommission_cluster")
def test_decommission_nodes(self, decommission_cluster):
cluster = mock.Mock()
instances = mock.Mock()
self.vh.decommission_nodes(cluster, instances)
decommission_cluster.assert_called_once_with(cluster,
instances)
@mock.patch(plugin_path + "deploy.scale_cluster")
def test_scale_cluster(self, scale_cluster):
cluster = mock.Mock()
instances = mock.Mock()
self.vh.scale_cluster(cluster, instances)
scale_cluster.assert_called_once_with(cluster, instances)
@mock.patch("sahara.conductor.API.cluster_update")
@mock.patch("sahara.context.ctx")
@mock.patch(cloudera_utils_path + "get_cloudera_manager_info",
return_value={})
@mock.patch(plugin_utils_path + "get_hue")
def test_set_cluster_info(self, get_hue, get_cloudera_manager_info,
ctx, cluster_update):
hue = mock.Mock()
hue.get_ip_or_dns_name.return_value = "1.2.3.4"
get_hue.return_value = hue
cluster = mock.Mock()
self.vh._set_cluster_info(cluster)
info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}}
cluster_update.assert_called_once_with(ctx(), cluster, info)
@mock.patch("sahara.plugins.utils.get_instance")
@mock.patch("sahara.plugins.utils.get_config_value_or_default")
@mock.patch("sahara.service.edp.job_utils.get_plugin")
def test_get_edp_engine(self, get_plugin, get_config_value_or_default,
get_instance):
cluster = mock.Mock()
job_type = 'Java'
ret = self.vh.get_edp_engine(cluster, job_type)
self.assertIsInstance(ret, edp_engine.EdpOozieEngine)
job_type = 'Spark'
ret = self.vh.get_edp_engine(cluster, job_type)
self.assertIsInstance(ret, edp_engine.EdpSparkEngine)
job_type = 'unsupported'
ret = self.vh.get_edp_engine(cluster, job_type)
self.assertIsNone(ret)
def test_get_edp_job_types(self):
ret = self.vh.get_edp_job_types()
expect = edp_engine.EdpOozieEngine.get_supported_job_types() + \
edp_engine.EdpSparkEngine.get_supported_job_types()
self.assertEqual(expect, ret)
@mock.patch(plugin_path +
"edp_engine.EdpOozieEngine.get_possible_job_config",
return_value={'job_config': {}})
def test_edp_config_hints(self, get_possible_job_config):
job_type = mock.Mock()
ret = self.vh.get_edp_config_hints(job_type)
get_possible_job_config.assert_called_once_with(job_type)
self.assertEqual(ret, {'job_config': {}})
@mock.patch(plugin_path + "deploy.get_open_ports", return_value=[1234])
def test_get_open_ports(self, get_open_ports):
node_group = mock.Mock()
ret = self.vh.get_open_ports(node_group)
get_open_ports.assert_called_once_with(node_group)
self.assertEqual(ret, [1234])
@mock.patch(plugin_utils_path + "recommend_configs")
def test_recommend_configs(self, recommend_configs):
cluster = mock.Mock()
scaling = mock.Mock()
self.vh.get_plugin_configs = mock.Mock()
self.vh.recommend_configs(cluster, scaling)
recommend_configs.assert_called_once_with(cluster,
self.vh.get_plugin_configs(),
scaling)
| shakamunyi/sahara | sahara/tests/unit/plugins/cdh/v5_11_0/test_versionhandler.py | Python | apache-2.0 | 5,857 |
#!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for the DoubleClick Ad Exchange Buyer API Python Client Library.
"""
__author__ = '[email protected] (Stan Grinberg)'
import os
from distutils.core import setup
from adspygoogle.adxbuyer import LIB_AUTHOR
from adspygoogle.adxbuyer import LIB_AUTHOR_EMAIL
from adspygoogle.adxbuyer import LIB_NAME
from adspygoogle.adxbuyer import LIB_URL
from adspygoogle.adxbuyer import LIB_VERSION
PACKAGES = ['adspygoogle', 'adspygoogle.common', 'adspygoogle.common.https',
'adspygoogle.common.soappy', 'adspygoogle.adxbuyer',
'adspygoogle.SOAPpy', 'adspygoogle.SOAPpy.wstools']
PACKAGE_DATA = {'adspygoogle.adxbuyer': [os.path.join('data', '*')]}
setup(name='adspygoogle.adxbuyer',
version=LIB_VERSION,
description=LIB_NAME,
author=LIB_AUTHOR,
author_email=LIB_AUTHOR_EMAIL,
maintainer=LIB_AUTHOR,
maintainer_email=LIB_AUTHOR_EMAIL,
url=LIB_URL,
license='Apache License 2.0',
long_description='For additional information, please see %s' % LIB_URL,
packages=PACKAGES,
package_data=PACKAGE_DATA,
platforms='any')
| donspaulding/adspygoogle | scripts/adspygoogle/adxbuyer/setup.py | Python | apache-2.0 | 1,720 |
Subsets and Splits