code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
#!/usr/bin/python2.5 # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from utils import * class Developers(Handler): def get(self): self.render('templates/developers.html', close_button=self.params.small, domain=self.domain, params=self.params) if __name__ == '__main__': run([('/developers', Developers)], debug=False)
jgeewax/googlepersonfinder
app/developers.py
Python
apache-2.0
875
from bson import ObjectId import simplejson as json from eve.tests import TestBase from eve.tests.test_settings import MONGO_DBNAME from eve.tests.utils import DummyEvent from eve import STATUS_OK, LAST_UPDATED, ID_FIELD, ISSUES, STATUS, ETAG from eve.methods.patch import patch_internal # @unittest.skip("don't need no freakin' tests!") class TestPatch(TestBase): def test_patch_to_resource_endpoint(self): _, status = self.patch(self.known_resource_url, data={}) self.assert405(status) def test_readonly_resource(self): _, status = self.patch(self.readonly_id_url, data={}) self.assert405(status) def test_unknown_id(self): _, status = self.patch(self.unknown_item_id_url, data={"key1": 'value1'}) self.assert404(status) def test_unknown_id_different_resource(self): # patching a 'user' with a valid 'contact' id will 404 _, status = self.patch('%s/%s/' % (self.different_resource, self.item_id), data={"key1": "value1"}) self.assert404(status) # of course we can still patch a 'user' _, status = self.patch('%s/%s/' % (self.different_resource, self.user_id), data={'key1': '{"username": "username1"}'}, headers=[('If-Match', self.user_etag)]) self.assert200(status) def test_by_name(self): _, status = self.patch(self.item_name_url, data={'key1': 'value1'}) self.assert405(status) def test_ifmatch_missing(self): _, status = self.patch(self.item_id_url, data={'key1': 'value1'}) self.assert403(status) def test_ifmatch_disabled(self): self.app.config['IF_MATCH'] = False r, status = self.patch(self.item_id_url, data={'key1': 'value1'}) self.assert200(status) self.assertTrue(ETAG not in r) def test_ifmatch_bad_etag(self): _, status = self.patch(self.item_id_url, data={'key1': 'value1'}, headers=[('If-Match', 'not-quite-right')]) self.assert412(status) def test_unique_value(self): # TODO # for the time being we are happy with testing only Eve's custom # validation. We rely on Cerberus' own test suite for other validation # unit tests. This test also makes sure that response status is # syntatically correcy in case of validation issues. # We should probably test every single case as well (seems overkill). r, status = self.patch(self.item_id_url, data={"ref": "%s" % self.alt_ref}, headers=[('If-Match', self.item_etag)]) self.assertValidationErrorStatus(status) self.assertValidationError(r, {'ref': "value '%s' is not unique" % self.alt_ref}) def test_patch_string(self): field = "ref" test_value = "1234567890123456789012345" changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get(field, r) self.assertEqual(db_value, test_value) def test_patch_integer(self): field = "prog" test_value = 9999 changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get(field, r) self.assertEqual(db_value, test_value) def test_patch_list_as_array(self): field = "role" test_value = ["vendor", "client"] changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get(field, r) self.assertTrue(set(test_value).issubset(db_value)) def test_patch_rows(self): field = "rows" test_value = [ {'sku': 'AT1234', 'price': 99}, {'sku': 'XF9876', 'price': 9999} ] changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get(field, r) for test_item in test_value: self.assertTrue(test_item in db_value) def test_patch_list(self): field = "alist" test_value = ["a_string", 99] changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get(field, r) self.assertEqual(db_value, test_value) def test_patch_dict(self): field = "location" test_value = {'address': 'an address', 'city': 'a city'} changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get(field, r) self.assertEqual(db_value, test_value) def test_patch_nested(self): field = "location.city" test_value = 'a nested city' changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get("location", r)["city"] self.assertEqual(db_value, test_value) def test_patch_datetime(self): field = "born" test_value = "Tue, 06 Nov 2012 10:33:31 GMT" changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get(field, r) self.assertEqual(db_value, test_value) def test_patch_objectid(self): field = "tid" test_value = "4f71c129c88e2018d4000000" changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get(field, r) self.assertEqual(db_value, test_value) def test_patch_null_objectid(self): # verify that #341 is fixed. field = "tid" test_value = None changes = {field: test_value} r = self.perform_patch(changes) db_value = self.compare_patch_with_get(field, r) self.assertEqual(db_value, test_value) def test_patch_defaults(self): field = "ref" test_value = "1234567890123456789012345" changes = {field: test_value} r = self.perform_patch(changes) self.assertRaises(KeyError, self.compare_patch_with_get, 'title', r) def test_patch_defaults_with_post_override(self): field = "ref" test_value = "1234567890123456789012345" r = self.perform_patch_with_post_override(field, test_value) self.assert200(r.status_code) self.assertRaises(KeyError, self.compare_patch_with_get, 'title', json.loads(r.get_data())) def test_patch_multiple_fields(self): fields = ['ref', 'prog', 'role'] test_values = ["9876543210987654321054321", 123, ["agent"]] changes = {"ref": test_values[0], "prog": test_values[1], "role": test_values[2]} r = self.perform_patch(changes) db_values = self.compare_patch_with_get(fields, r) for i in range(len(db_values)): self.assertEqual(db_values[i], test_values[i]) def test_patch_with_post_override(self): # a POST request with PATCH override turns into a PATCH request r = self.perform_patch_with_post_override('prog', 1) self.assert200(r.status_code) def test_patch_internal(self): # test that patch_internal is available and working properly. test_field = 'ref' test_value = "9876543210987654321098765" data = {test_field: test_value} with self.app.test_request_context(self.item_id_url): r, _, _, status = patch_internal( self.known_resource, data, concurrency_check=False, **{'_id': self.item_id}) db_value = self.compare_patch_with_get(test_field, r) self.assertEqual(db_value, test_value) self.assert200(status) def perform_patch(self, changes): r, status = self.patch(self.item_id_url, data=changes, headers=[('If-Match', self.item_etag)]) self.assert200(status) self.assertPatchResponse(r, self.item_id) return r def perform_patch_with_post_override(self, field, value): headers = [('X-HTTP-Method-Override', 'PATCH'), ('If-Match', self.item_etag), ('Content-Type', 'application/json')] return self.test_client.post(self.item_id_url, data=json.dumps({field: value}), headers=headers) def compare_patch_with_get(self, fields, patch_response): raw_r = self.test_client.get(self.item_id_url) r, status = self.parse_response(raw_r) self.assert200(status) self.assertEqual(raw_r.headers.get('ETag'), patch_response[ETAG]) if isinstance(fields, str): return r[fields] else: return [r[field] for field in fields] def test_patch_allow_unknown(self): changes = {"unknown": "unknown"} r, status = self.patch(self.item_id_url, data=changes, headers=[('If-Match', self.item_etag)]) self.assertValidationErrorStatus(status) self.assertValidationError(r, {'unknown': 'unknown field'}) self.app.config['DOMAIN'][self.known_resource]['allow_unknown'] = True r, status = self.patch(self.item_id_url, data=changes, headers=[('If-Match', self.item_etag)]) self.assert200(status) self.assertPatchResponse(r, self.item_id) def test_patch_x_www_form_urlencoded(self): field = "ref" test_value = "1234567890123456789012345" changes = {field: test_value} headers = [('If-Match', self.item_etag)] r, status = self.parse_response(self.test_client.patch( self.item_id_url, data=changes, headers=headers)) self.assert200(status) self.assertTrue('OK' in r[STATUS]) def test_patch_referential_integrity(self): data = {"person": self.unknown_item_id} headers = [('If-Match', self.invoice_etag)] r, status = self.patch(self.invoice_id_url, data=data, headers=headers) self.assertValidationErrorStatus(status) expected = ("value '%s' must exist in resource '%s', field '%s'" % (self.unknown_item_id, 'contacts', self.app.config['ID_FIELD'])) self.assertValidationError(r, {'person': expected}) data = {"person": self.item_id} r, status = self.patch(self.invoice_id_url, data=data, headers=headers) self.assert200(status) self.assertPatchResponse(r, self.invoice_id) def test_patch_write_concern_success(self): # 0 and 1 are the only valid values for 'w' on our mongod instance (1 # is the default) self.domain['contacts']['mongo_write_concern'] = {'w': 0} field = "ref" test_value = "X234567890123456789012345" changes = {field: test_value} _, status = self.patch(self.item_id_url, data=changes, headers=[('If-Match', self.item_etag)]) self.assert200(status) def test_patch_write_concern_fail(self): # should get a 500 since there's no replicaset on the mongod instance self.domain['contacts']['mongo_write_concern'] = {'w': 2} field = "ref" test_value = "X234567890123456789012345" changes = {field: test_value} _, status = self.patch(self.item_id_url, data=changes, headers=[('If-Match', self.item_etag)]) self.assert500(status) def test_patch_missing_standard_date_fields(self): """Documents created outside the API context could be lacking the LAST_UPDATED and/or DATE_CREATED fields. """ # directly insert a document, without DATE_CREATED e LAST_UPDATED # values. contacts = self.random_contacts(1, False) ref = 'test_update_field' contacts[0]['ref'] = ref _db = self.connection[MONGO_DBNAME] _db.contacts.insert(contacts) # now retrieve same document via API and get its etag, which is # supposed to be computed on default DATE_CREATED and LAST_UPDATAED # values. response, status = self.get(self.known_resource, item=ref) etag = response[ETAG] _id = response['_id'] # attempt a PATCH with the new etag. field = "ref" test_value = "X234567890123456789012345" changes = {field: test_value} _, status = self.patch('%s/%s' % (self.known_resource_url, _id), data=changes, headers=[('If-Match', etag)]) self.assert200(status) def test_patch_subresource(self): _db = self.connection[MONGO_DBNAME] # create random contact fake_contact = self.random_contacts(1) fake_contact_id = _db.contacts.insert(fake_contact)[0] # update first invoice to reference the new contact _db.invoices.update({'_id': ObjectId(self.invoice_id)}, {'$set': {'person': fake_contact_id}}) # GET all invoices by new contact response, status = self.get('users/%s/invoices/%s' % (fake_contact_id, self.invoice_id)) etag = response[ETAG] data = {"inv_number": "new_number"} headers = [('If-Match', etag)] response, status = self.patch('users/%s/invoices/%s' % (fake_contact_id, self.invoice_id), data=data, headers=headers) self.assert200(status) self.assertPatchResponse(response, self.invoice_id) def test_patch_bandwidth_saver(self): changes = {'ref': '1234567890123456789012345'} # bandwidth_saver is on by default self.assertTrue(self.app.config['BANDWIDTH_SAVER']) r = self.perform_patch(changes) self.assertFalse('ref' in r) db_value = self.compare_patch_with_get(self.app.config['ETAG'], r) self.assertEqual(db_value, r[self.app.config['ETAG']]) self.item_etag = r[self.app.config['ETAG']] # test return all fields (bandwidth_saver off) self.app.config['BANDWIDTH_SAVER'] = False r = self.perform_patch(changes) self.assertTrue('ref' in r) db_value = self.compare_patch_with_get(self.app.config['ETAG'], r) self.assertEqual(db_value, r[self.app.config['ETAG']]) def test_patch_readonly_field_with_previous_document(self): schema = self.domain['contacts']['schema'] del(schema['ref']['required']) # disable read-only on the field so we can store a value which is # also different form its default value. schema['read_only_field']['readonly'] = False changes = {'read_only_field': 'value'} r = self.perform_patch(changes) # resume read-only status for the field self.domain['contacts']['schema']['read_only_field']['readonly'] = True # test that if the read-only field is included with the payload and its # value is equal to the one stored with the document, validation # succeeds (#479). etag = r['_etag'] r, status = self.patch(self.item_id_url, data=changes, headers=[('If-Match', etag)]) self.assert200(status) self.assertPatchResponse(r, self.item_id) # test that if the read-only field is included with the payload and its # value is different from the stored document, validation fails. etag = r['_etag'] changes = {'read_only_field': 'another value'} r, status = self.patch(self.item_id_url, data=changes, headers=[('If-Match', etag)]) self.assert422(status) self.assertTrue('is read-only' in r['_issues']['read_only_field']) def assertPatchResponse(self, response, item_id): self.assertTrue(STATUS in response) self.assertTrue(STATUS_OK in response[STATUS]) self.assertFalse(ISSUES in response) self.assertTrue(ID_FIELD in response) self.assertEqual(response[ID_FIELD], item_id) self.assertTrue(LAST_UPDATED in response) self.assertTrue(ETAG in response) self.assertTrue('_links' in response) self.assertItemLink(response['_links'], item_id) def patch(self, url, data, headers=[]): headers.append(('Content-Type', 'application/json')) r = self.test_client.patch(url, data=json.dumps(data), headers=headers) return self.parse_response(r) class TestEvents(TestBase): new_ref = "0123456789012345678901234" def test_on_pre_PATCH(self): devent = DummyEvent(self.before_update) self.app.on_pre_PATCH += devent self.patch() self.assertEqual(self.known_resource, devent.called[0]) self.assertEqual(3, len(devent.called)) def test_on_pre_PATCH_contacts(self): devent = DummyEvent(self.before_update) self.app.on_pre_PATCH_contacts += devent self.patch() self.assertEqual(2, len(devent.called)) def test_on_PATCH_dynamic_filter(self): def filter_this(resource, request, lookup): lookup["_id"] = self.unknown_item_id self.app.on_pre_PATCH += filter_this # Would normally patch the known document; will return 404 instead. r, s = self.parse_response(self.patch()) self.assert404(s) def test_on_post_PATCH(self): devent = DummyEvent(self.after_update) self.app.on_post_PATCH += devent self.patch() self.assertEqual(self.known_resource, devent.called[0]) self.assertEqual(200, devent.called[2].status_code) self.assertEqual(3, len(devent.called)) def test_on_post_PATCH_contacts(self): devent = DummyEvent(self.after_update) self.app.on_post_PATCH_contacts += devent self.patch() self.assertEqual(200, devent.called[1].status_code) self.assertEqual(2, len(devent.called)) def test_on_update(self): devent = DummyEvent(self.before_update) self.app.on_update += devent self.patch() self.assertEqual(self.known_resource, devent.called[0]) self.assertEqual(3, len(devent.called)) def test_on_update_contacts(self): devent = DummyEvent(self.before_update) self.app.on_update_contacts += devent self.patch() self.assertEqual(2, len(devent.called)) def test_on_updated(self): devent = DummyEvent(self.after_update) self.app.on_updated += devent self.patch() self.assertEqual(self.known_resource, devent.called[0]) self.assertEqual(3, len(devent.called)) def test_on_updated_contacts(self): devent = DummyEvent(self.after_update) self.app.on_updated_contacts += devent self.patch() self.assertEqual(2, len(devent.called)) def before_update(self): db = self.connection[MONGO_DBNAME] contact = db.contacts.find_one(ObjectId(self.item_id)) return contact['ref'] == self.item_name def after_update(self): return not self.before_update() def patch(self): headers = [('Content-Type', 'application/json'), ('If-Match', self.item_etag)] data = json.dumps({"ref": self.new_ref}) return self.test_client.patch( self.item_id_url, data=data, headers=headers)
opticode/eve
eve/tests/methods/patch.py
Python
bsd-3-clause
19,911
from __future__ import absolute_import import urwid from . import common def _mkhelp(): text = [] keys = [ ("A", "accept all intercepted flows"), ("a", "accept this intercepted flow"), ("C", "clear flow list or eventlog"), ("d", "delete flow"), ("D", "duplicate flow"), ("e", "toggle eventlog"), ("F", "toggle follow flow list"), ("l", "set limit filter pattern"), ("L", "load saved flows"), ("r", "replay request"), ("V", "revert changes to request"), ("w", "save flows "), ("W", "stream flows to file"), ("X", "kill and delete flow, even if it's mid-intercept"), ("tab", "tab between eventlog and flow list"), ("enter", "view flow"), ("|", "run script on this flow"), ] text.extend(common.format_keyvals(keys, key="key", val="text", indent=4)) return text help_context = _mkhelp() footer = [ ('heading_key', "?"), ":help ", ] class EventListBox(urwid.ListBox): def __init__(self, master): self.master = master urwid.ListBox.__init__(self, master.eventlist) def keypress(self, size, key): key = common.shortcuts(key) if key == "C": self.master.clear_events() key = None return urwid.ListBox.keypress(self, size, key) class BodyPile(urwid.Pile): def __init__(self, master): h = urwid.Text("Event log") h = urwid.Padding(h, align="left", width=("relative", 100)) self.inactive_header = urwid.AttrWrap(h, "heading_inactive") self.active_header = urwid.AttrWrap(h, "heading") urwid.Pile.__init__( self, [ FlowListBox(master), urwid.Frame(EventListBox(master), header = self.inactive_header) ] ) self.master = master def keypress(self, size, key): if key == "tab": self.focus_position = (self.focus_position + 1)%len(self.widget_list) if self.focus_position == 1: self.widget_list[1].header = self.active_header else: self.widget_list[1].header = self.inactive_header key = None elif key == "e": self.master.toggle_eventlog() key = None # This is essentially a copypasta from urwid.Pile's keypress handler. # So much for "closed for modification, but open for extension". item_rows = None if len(size)==2: item_rows = self.get_item_rows( size, focus=True ) i = self.widget_list.index(self.focus_item) tsize = self.get_item_size(size,i,True,item_rows) return self.focus_item.keypress( tsize, key ) class ConnectionItem(common.WWrap): def __init__(self, master, state, flow, focus): self.master, self.state, self.flow = master, state, flow self.f = focus w = self.get_text() common.WWrap.__init__(self, w) def get_text(self): return common.format_flow(self.flow, self.f, hostheader=self.master.showhost) def selectable(self): return True def save_flows_prompt(self, k): if k == "a": self.master.path_prompt( "Save all flows to: ", self.state.last_saveload, self.master.save_flows ) else: self.master.path_prompt( "Save this flow to: ", self.state.last_saveload, self.master.save_one_flow, self.flow ) def stop_server_playback_prompt(self, a): if a != "n": self.master.stop_server_playback() def server_replay_prompt(self, k): if k == "a": self.master.start_server_playback( [i.copy() for i in self.master.state.view], self.master.killextra, self.master.rheaders, False, self.master.nopop, self.master.options.replay_ignore_params, self.master.options.replay_ignore_content ) elif k == "t": self.master.start_server_playback( [self.flow.copy()], self.master.killextra, self.master.rheaders, False, self.master.nopop, self.master.options.replay_ignore_params, self.master.options.replay_ignore_content ) else: self.master.path_prompt( "Server replay path: ", self.state.last_saveload, self.master.server_playback_path ) def keypress(self, (maxcol,), key): key = common.shortcuts(key) if key == "a": self.flow.accept_intercept(self.master) self.master.sync_list_view() elif key == "d": self.flow.kill(self.master) self.state.delete_flow(self.flow) self.master.sync_list_view() elif key == "D": f = self.master.duplicate_flow(self.flow) self.master.view_flow(f) elif key == "r": r = self.master.replay_request(self.flow) if r: self.master.statusbar.message(r) self.master.sync_list_view() elif key == "S": if not self.master.server_playback: self.master.prompt_onekey( "Server Replay", ( ("all flows", "a"), ("this flow", "t"), ("file", "f"), ), self.server_replay_prompt, ) else: self.master.prompt_onekey( "Stop current server replay?", ( ("yes", "y"), ("no", "n"), ), self.stop_server_playback_prompt, ) elif key == "V": if not self.flow.modified(): self.master.statusbar.message("Flow not modified.") return self.state.revert(self.flow) self.master.sync_list_view() self.master.statusbar.message("Reverted.") elif key == "w": self.master.prompt_onekey( "Save", ( ("all flows", "a"), ("this flow", "t"), ), self.save_flows_prompt, ) elif key == "X": self.flow.kill(self.master) elif key == "enter": if self.flow.request: self.master.view_flow(self.flow) elif key == "|": self.master.path_prompt( "Send flow to script: ", self.state.last_script, self.master.run_script_once, self.flow ) else: return key class FlowListWalker(urwid.ListWalker): def __init__(self, master, state): self.master, self.state = master, state if self.state.flow_count(): self.set_focus(0) def get_focus(self): f, i = self.state.get_focus() f = ConnectionItem(self.master, self.state, f, True) if f else None return f, i def set_focus(self, focus): ret = self.state.set_focus(focus) return ret def get_next(self, pos): f, i = self.state.get_next(pos) f = ConnectionItem(self.master, self.state, f, False) if f else None return f, i def get_prev(self, pos): f, i = self.state.get_prev(pos) f = ConnectionItem(self.master, self.state, f, False) if f else None return f, i class FlowListBox(urwid.ListBox): def __init__(self, master): self.master = master urwid.ListBox.__init__(self, master.flow_list_walker) def keypress(self, size, key): key = common.shortcuts(key) if key == "A": self.master.accept_all() self.master.sync_list_view() elif key == "C": self.master.clear_flows() elif key == "e": self.master.toggle_eventlog() elif key == "l": self.master.prompt("Limit: ", self.master.state.limit_txt, self.master.set_limit) elif key == "L": self.master.path_prompt( "Load flows: ", self.master.state.last_saveload, self.master.load_flows_callback ) elif key == "F": self.master.toggle_follow_flows() elif key == "W": if self.master.stream: self.master.stop_stream() else: self.master.path_prompt( "Stream flows to: ", self.master.state.last_saveload, self.master.start_stream_to_path ) else: return urwid.ListBox.keypress(self, size, key)
xtso520ok/mitmproxy
libmproxy/console/flowlist.py
Python
mit
8,997
# 49 m = 0 b = 1 f = 1 while f is not None: a = f f = None while True: x = str(a ** b) if len(x) > b: break if len(x) == b: if f is None: f = a m += 1 a += 1 b += 1 print m
higgsd/euler
py/63.py
Python
bsd-2-clause
273
import argparse import os from db import db from management import submissions from models import Submission from tests import utils def test_delete_disconnected_db_submissions(journalist_app, app_storage, config): """ Test that Submission records without corresponding files are deleted. """ with journalist_app.app_context(): source, _ = utils.db_helper.init_source(app_storage) source_id = source.id # make two submissions utils.db_helper.submit(app_storage, source, 2) submission_id = source.submissions[0].id # remove one submission's file f1 = os.path.join(config.STORE_DIR, source.filesystem_id, source.submissions[0].filename) assert os.path.exists(f1) os.remove(f1) assert os.path.exists(f1) is False # check that the single disconnect is seen disconnects = submissions.find_disconnected_db_submissions(config.STORE_DIR) assert len(disconnects) == 1 assert disconnects[0].filename == source.submissions[0].filename # remove the disconnected Submission args = argparse.Namespace(force=True, store_dir=config.STORE_DIR) submissions.delete_disconnected_db_submissions(args) assert db.session.query(Submission).filter(Submission.id == submission_id).count() == 0 assert db.session.query(Submission).filter(Submission.source_id == source_id).count() == 1 def test_delete_disconnected_fs_submissions(journalist_app, app_storage, config): """ Test that files in the store without corresponding Submission records are deleted. """ source, _ = utils.db_helper.init_source(app_storage) # make two submissions utils.db_helper.submit(app_storage, source, 2) source_filesystem_id = source.filesystem_id submission_filename = source.submissions[0].filename disconnect_path = os.path.join(config.STORE_DIR, source_filesystem_id, submission_filename) # make two replies, to make sure that their files are not seen # as disconnects journalist, _ = utils.db_helper.init_journalist("Mary", "Lane") utils.db_helper.reply(app_storage, journalist, source, 2) # delete the first Submission record db.session.delete(source.submissions[0]) db.session.commit() disconnects = submissions.find_disconnected_fs_submissions(config.STORE_DIR) assert len(disconnects) == 1 assert disconnects[0] == disconnect_path assert os.path.exists(disconnect_path) args = argparse.Namespace(force=True, store_dir=config.STORE_DIR) submissions.delete_disconnected_fs_submissions(args) assert os.path.exists(disconnect_path) is False
freedomofpress/securedrop
securedrop/tests/test_submission_cleanup.py
Python
agpl-3.0
2,674
# -*- coding: utf-8 -*- # Copyright (c) 2002 - 2015 Detlev Offenbach <[email protected]> # """ Package implementing an interface to the pyunit unittest package. The package consist of a single dialog, which may be called as a standalone version using the eric6_unittest script or from within the eric6 IDE. If it is called from within eric6, it has the additional function to open a source file that failed a test. """
testmana2/test
PyUnit/__init__.py
Python
gpl-3.0
429
__author__ = 'panzer' def do_twice(f, arg): f(arg) f(arg) def print_twice(s): print s print s def do_four(f, arg): do_twice(f, arg) do_twice(f, arg) do_twice(print_twice, 'spam') do_four(print_twice, 'spam')
BigFatNoob-NCSU/x9115george2
hw/code/2/3_4.py
Python
mit
240
"""Generate hammer command tree in json format by inspecting every command's help. """ import json from robottelo import ssh from robottelo.cli import hammer def generate_command_tree(command): """Recursively walk trhough the hammer commands and subcommands and fetch their help. Return a dictionary with the contents. """ output = ssh.command('{0} --help'.format(command)).stdout contents = hammer.parse_help(output) if len(contents['subcommands']) > 0: for subcommand in contents['subcommands']: subcommand.update(generate_command_tree( '{0} {1}'.format(command, subcommand['name']) )) return contents # Generate the json file in the working directory with open('hammer_commands.json', 'w') as f: f.write(json.dumps( generate_command_tree('hammer'), indent=2, sort_keys=True ))
tkolhar/robottelo
scripts/hammer_command_tree.py
Python
gpl-3.0
894
from a10sdk.common.A10BaseClass import A10BaseClass class RulesList(A10BaseClass): """This class does not support CRUD Operations please use parent. :param std_list_action: {"enum": ["deny", "permit"], "type": "string", "description": "'deny': Specify community to reject; 'permit': Specify community to accept; ", "format": "enum"} :param std_list_comm_value: {"type": "string", "description": "community value in the format 1-4294967295|AA:NN|internet|local-AS|no-advertise|no-export", "format": "string-rlx"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "rules-list" self.DeviceProxy = "" self.std_list_action = "" self.std_list_comm_value = "" for keys, value in kwargs.items(): setattr(self,keys, value) class StandardNum(A10BaseClass): """Class Description:: Configure Standard number Community-list. Class standard-num supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` :param rules_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "std-list-action": {"enum": ["deny", "permit"], "type": "string", "description": "'deny': Specify community to reject; 'permit': Specify community to accept; ", "format": "enum"}, "std-list-comm-value": {"type": "string", "description": "community value in the format 1-4294967295|AA:NN|internet|local-AS|no-advertise|no-export", "format": "string-rlx"}}}]} :param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"} :param std_list_num: {"description": "Community list number (standard)", "format": "number", "type": "number", "maximum": 99, "minimum": 1, "optional": false} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/ip/community-list/standard-num/{std_list_num}`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required = [ "std_list_num"] self.b_key = "standard-num" self.a10_url="/axapi/v3/ip/community-list/standard-num/{std_list_num}" self.DeviceProxy = "" self.rules_list = [] self.uuid = "" self.std_list_num = "" for keys, value in kwargs.items(): setattr(self,keys, value)
amwelch/a10sdk-python
a10sdk/core/ip/ip_community_list_standard_num.py
Python
apache-2.0
2,739
import serial import time import logger strd_0 = " SIN SERVICIO " strd_1 = " " def main(): try: for intento in range(0,2): serial_MDB = serial.Serial('/dev/ttyUSBMDB', 9600, timeout = 0.1) serial_MDB.write('\x0C\x00\x00\x00\x00') #Inhabilitar Monedero MDB rx = str(serial_MDB.readline()) serial_MDB.close() if rx == "00": break except: logger.error("No esta disponible el puerto ttyUSBMDB") time.sleep(1) try: serial_Display = serial.Serial('/dev/ttyUSBDisplay', 9600, timeout = 0.1) serial_Display.write('CS\x00') serial_Display.write('TP\x01\x00') serial_Display.write('TT SIN SERVICIO ') serial_Display.write('\x00\x00') serial_Display.write('TT ') #serial_MDB.write('\x0C\xFF\xFF\xFF\xFF') # Habilitar Monedero MDB #time.sleep(0.50) serial_Display.close() except: logger.error("No esta disponible el puerto ttyUSBDisplay") #main()
the-adrian/KernotekV2.0
InhibirMDB.py
Python
gpl-3.0
1,039
""" Anscombe's quartet ================== _thumb: .4, .4 """ import seaborn as sns sns.set_theme(style="ticks") # Load the example dataset for Anscombe's quartet df = sns.load_dataset("anscombe") # Show the results of a linear regression within each dataset sns.lmplot(x="x", y="y", col="dataset", hue="dataset", data=df, col_wrap=2, ci=None, palette="muted", height=4, scatter_kws={"s": 50, "alpha": 1})
mwaskom/seaborn
examples/anscombes_quartet.py
Python
bsd-3-clause
430
class Name: # Use (object) in 2.X "name descriptor docs" def __get__(self, instance, owner): print('fetch...') return instance._name def __set__(self, instance, value): print('change...') instance._name = value def __delete__(self, instance): print('remove...') del instance._name class Person: # Use (object) in 2.X def __init__(self, name): self._name = name name = Name() # Assign descriptor to attr bob = Person('Bob Smith') # bob has a managed attribute print(bob.name) # Runs Name.__get__ bob.name = 'Robert Smith' # Runs Name.__set__ print(bob.name) del bob.name # Runs Name.__delete__ print('-'*20) sue = Person('Sue Jones') # sue inherits descriptor too print(sue.name) print(Name.__doc__) # Or help(Name)
simontakite/sysadmin
pythonscripts/learningPython/desc-person.py
Python
gpl-2.0
1,014
from __future__ import absolute_import import errno import os import time from rpython.rlib.objectmodel import compute_identity_hash from rpython.rlib.rfloat import round_double from rpython.rlib.streamio import open_file_as_stream from topaz.coerce import Coerce from topaz.error import RubyError, error_for_oserror, error_for_errno from topaz.module import ModuleDef, check_frozen from topaz.modules.process import Process from topaz.objects.bindingobject import W_BindingObject from topaz.objects.exceptionobject import W_ExceptionObject from topaz.objects.functionobject import W_FunctionObject from topaz.objects.moduleobject import W_ModuleObject from topaz.objects.procobject import W_ProcObject from topaz.objects.randomobject import W_RandomObject from topaz.objects.stringobject import W_StringObject from topaz.scope import StaticScope class Kernel(object): moduledef = ModuleDef("Kernel") @moduledef.method("class") def function_class(self, space): return space.getnonsingletonclass(self) @moduledef.method("singleton_methods", all="bool") def method_singleton_methods(self, space, all=True): methods = [] w_cls = space.getclass(self) if w_cls.is_singleton: methods.extend(w_cls.methods_w.keys()) w_cls = w_cls.superclass if all: while w_cls and w_cls.is_singleton: methods.extend(w_cls.methods_w.keys()) w_cls = w_cls.superclass return space.newarray([space.newsymbol(m) for m in methods]) @moduledef.method("methods", inherit="bool") def method_methods(self, space, inherit=True): w_cls = space.getclass(self) return space.newarray([ space.newsymbol(m) for m in w_cls.methods(space, inherit=inherit) ]) @moduledef.method("private_methods", inherit="bool") def method_private_methods(self, space, inherit=True): w_cls = space.getclass(self) return space.newarray([ space.newsymbol(m) for m in w_cls.methods(space, visibility=W_FunctionObject.PRIVATE, inherit=inherit) ]) @moduledef.method("protected_methods", inherit="bool") def method_protected_methods(self, space, inherit=True): w_cls = space.getclass(self) return space.newarray([ space.newsymbol(m) for m in w_cls.methods(space, visibility=W_FunctionObject.PROTECTED, inherit=inherit) ]) @moduledef.method("public_methods", inherit="bool") def method_public_methods(self, space, inherit=True): w_cls = space.getclass(self) return space.newarray([ space.newsymbol(m) for m in w_cls.methods(space, visibility=W_FunctionObject.PUBLIC, inherit=inherit) ]) @moduledef.method("lambda") def function_lambda(self, space, block): if block is None: block = space.getexecutioncontext().gettoprubyframe().block if block is None: raise space.error(space.w_ArgumentError, "tried to create lambda object without a block" ) else: return block.copy(space, is_lambda=True) @moduledef.method("proc") def function_proc(self, space, block): if block is None: block = space.getexecutioncontext().gettoprubyframe().block if block is None: raise space.error(space.w_ArgumentError, "tried to create Proc object without a block" ) return block.copy(space) @staticmethod def find_feature(space, path): assert path is not None if os.path.isfile(path): return path if not path.endswith(".rb"): path += ".rb" if not (path.startswith("/") or path.startswith("./") or path.startswith("../")): w_load_path = space.globals.get(space, "$LOAD_PATH") for w_base in space.listview(w_load_path): base = Coerce.path(space, w_base) full = os.path.join(base, path) if os.path.isfile(full): path = os.path.join(base, path) break return path @staticmethod def load_feature(space, path, orig_path, wrap=False): if not os.path.exists(path): raise space.error(space.w_LoadError, orig_path) try: f = open_file_as_stream(path, buffering=0) try: contents = f.readall() finally: f.close() except OSError as e: raise error_for_oserror(space, e) if wrap: lexical_scope = StaticScope(space.newmodule("Anonymous"), None) else: lexical_scope = None space.execute(contents, filepath=path, lexical_scope=lexical_scope) @moduledef.function("require", path="path") def function_require(self, space, path): assert path is not None orig_path = path path = Kernel.find_feature(space, path) w_loaded_features = space.globals.get(space, '$"') w_already_loaded = space.send( w_loaded_features, "include?", [space.newstr_fromstr(path)] ) if space.is_true(w_already_loaded): return space.w_false Kernel.load_feature(space, path, orig_path) w_loaded_features.method_lshift(space, space.newstr_fromstr(path)) return space.w_true @moduledef.function("load", path="path", wrap="bool") def function_load(self, space, path, wrap=False): assert path is not None orig_path = path path = Kernel.find_feature(space, path) Kernel.load_feature(space, path, orig_path, wrap=wrap) return space.w_true @moduledef.method("fail") @moduledef.method("raise") def method_raise(self, space, w_str_or_exception=None, w_string=None, w_array=None): w_exception = None if w_str_or_exception is None: w_exception = space.globals.get(space, "$!") or space.w_nil if w_exception is space.w_nil: w_exception = space.w_RuntimeError elif isinstance(w_str_or_exception, W_StringObject): w_exception = space.w_RuntimeError w_string = w_str_or_exception else: w_exception = w_str_or_exception if not space.respond_to(w_exception, "exception"): raise space.error(space.w_TypeError, "exception class/object expected" ) if w_string is not None: w_exc = space.send(w_exception, "exception", [w_string]) else: w_exc = space.send(w_exception, "exception") if w_array is not None: raise space.error( space.w_NotImplementedError, "custom backtrace for Kernel#raise" ) if not isinstance(w_exc, W_ExceptionObject): raise space.error(space.w_TypeError, "exception object expected" ) raise RubyError(w_exc) @moduledef.function("exit") def method_exit(self, space, args_w): return space.send( space.getmoduleobject(Process.moduledef), "exit", args_w ) @moduledef.function("exit!") def method_exit_bang(self, space, args_w): return space.send( space.getmoduleobject(Process.moduledef), "exit!", args_w ) @moduledef.function("abort", msg="str") def method_abort(self, space, msg=None): if msg: os.write(2, msg) return space.send(self, "exit", [space.newint(1)]) @moduledef.function("block_given?") @moduledef.function("iterator?") def method_block_givenp(self, space): return space.newbool( space.getexecutioncontext().gettoprubyframe().block is not None ) @moduledef.function("binding") def method_binding(self, space): return space.newbinding_fromframe(space.getexecutioncontext().gettoprubyframe()) @moduledef.function("__method__") @moduledef.function("__callee__") def method_callee(self, space): frame = space.getexecutioncontext().gettoprubyframe() return space.newsymbol(frame.bytecode.name) @moduledef.function("exec") def method_exec(self, space, args_w): if len(args_w) > 1 and space.respond_to(args_w[0], "to_hash"): raise space.error(space.w_NotImplementedError, "exec with environment") if len(args_w) > 1 and space.respond_to(args_w[-1], "to_hash"): raise space.error(space.w_NotImplementedError, "exec with options") if space.respond_to(args_w[0], "to_ary"): w_cmd = space.convert_type(args_w[0], space.w_array, "to_ary") cmd_w = space.listview(w_cmd) if len(cmd_w) != 2: raise space.error(space.w_ArgumentError, "wrong first argument") cmd, argv0 = [ space.str0_w(space.convert_type( w_e, space.w_string, "to_str" )) for w_e in cmd_w ] else: w_cmd = space.convert_type(args_w[0], space.w_string, "to_str") cmd = space.str0_w(w_cmd) argv0 = None if len(args_w) > 1 or argv0 is not None: if argv0 is None: sepidx = cmd.rfind(os.sep) + 1 if sepidx > 0: argv0 = cmd[sepidx:] else: argv0 = cmd args = [argv0] args += [ space.str0_w(space.convert_type( w_arg, space.w_string, "to_str" )) for w_arg in args_w[1:] ] try: os.execv(cmd, args) except OSError as e: raise error_for_oserror(space, e) else: if not cmd: raise error_for_errno(space, errno.ENOENT) shell = os.environ.get("RUBYSHELL") or os.environ.get("COMSPEC") or "/bin/sh" sepidx = shell.rfind(os.sep) + 1 if sepidx > 0: argv0 = shell[sepidx:] else: argv0 = shell try: os.execv(shell, [argv0, "-c", cmd]) except OSError as e: raise error_for_oserror(space, e) @moduledef.function("system") def method_system(self, space, args_w): raise space.error(space.w_NotImplementedError, "Kernel#system()") @moduledef.function("fork") def method_fork(self, space, block): return space.send( space.getmoduleobject(Process.moduledef), "fork", block=block ) @moduledef.function("at_exit") def method_at_exit(self, space, block): space.register_exit_handler(block) return block @moduledef.function("=~") def method_match(self, space, w_other): return space.w_nil @moduledef.function("!~") def method_not_match(self, space, w_other): return space.newbool(not space.is_true(space.send(self, "=~", [w_other]))) @moduledef.function("eql?") def method_eqlp(self, space, w_other): return space.newbool(self is w_other) @moduledef.function("instance_variable_defined?", name="symbol") def method_instance_variable_definedp(self, space, name): return space.newbool(self.find_instance_var(space, name) is not None) @moduledef.method("respond_to?", include_private="bool") def method_respond_top(self, space, w_name, include_private=False): if space.respond_to(self, space.symbol_w(w_name)): return space.newbool(True) w_found = space.send( self, "respond_to_missing?", [w_name, space.newbool(include_private)] ) return space.newbool(space.is_true(w_found)) @moduledef.method("respond_to_missing?") def method_respond_to_missingp(self, space, w_name, w_include_private): return space.newbool(False) @moduledef.method("dup") def method_dup(self, space): if (self is space.w_nil or self is space.w_true or self is space.w_false or space.is_kind_of(self, space.w_symbol) or space.is_kind_of(self, space.w_fixnum)): raise space.error(space.w_TypeError, "can't dup %s" % space.getclass(self).name) w_dup = space.send(space.getnonsingletonclass(self), "allocate") w_dup.copy_instance_vars(space, self) space.infect(w_dup, self, freeze=False) space.send(w_dup, "initialize_dup", [self]) return w_dup @moduledef.method("clone") def method_clone(self, space): if (self is space.w_nil or self is space.w_true or self is space.w_false or space.is_kind_of(self, space.w_symbol) or space.is_kind_of(self, space.w_fixnum)): raise space.error(space.w_TypeError, "can't dup %s" % space.getclass(self).name) w_dup = space.send(space.getnonsingletonclass(self), "allocate") w_dup.copy_instance_vars(space, self) w_dup.copy_singletonclass(space, space.getsingletonclass(self)) space.send(w_dup, "initialize_clone", [self]) space.infect(w_dup, self, freeze=True) return w_dup @moduledef.method("sleep") def method_sleep(self, space, w_duration=None): if w_duration is None: raise space.error(space.w_NotImplementedError) elif space.is_kind_of(w_duration, space.w_string): raise space.error(space.w_TypeError, "can't convert String into time interval") start = time.time() time.sleep(Coerce.float(space, w_duration)) return space.newint(int(round_double(time.time() - start, 0))) @moduledef.method("initialize_clone") @moduledef.method("initialize_dup") def method_initialize_dup(self, space, w_other): space.send(self, "initialize_copy", [w_other]) return self @moduledef.method("initialize_copy") def method_initialize_copy(self, space, w_other): return self @moduledef.function("Float") def method_Float(self, space, w_arg): if w_arg is space.w_nil: raise space.error(space.w_TypeError, "can't convert nil into Float") elif space.is_kind_of(w_arg, space.w_float): return space.newfloat(space.float_w(w_arg)) elif space.is_kind_of(w_arg, space.w_string): string = space.str_w(w_arg).strip(" ") try: return space.newfloat(float(string)) except ValueError: raise space.error(space.w_ArgumentError, "invalid value for Float(): %s" % string) else: return space.convert_type(w_arg, space.w_float, "to_f") @moduledef.method("kind_of?") @moduledef.method("is_a?") def method_is_kind_ofp(self, space, w_mod): if not isinstance(w_mod, W_ModuleObject): raise space.error(space.w_TypeError, "class or module required") return space.newbool(self.is_kind_of(space, w_mod)) @moduledef.method("instance_of?") def method_instance_of(self, space, w_mod): if not isinstance(w_mod, W_ModuleObject): raise space.error(space.w_TypeError, "class or module required") return space.newbool(space.getnonsingletonclass(self) is w_mod) @moduledef.method("eval") def method_eval(self, space, w_source, w_binding=None): if w_binding is None: frame = space.getexecutioncontext().gettoprubyframe() w_binding = space.newbinding_fromframe(frame) elif not isinstance(w_binding, W_BindingObject): raise space.error(space.w_TypeError, "wrong argument type %s (expected Binding)" % space.getclass(w_binding).name ) return space.send(w_binding, "eval", [w_source]) @moduledef.method("set_trace_func") def method_set_trace_func(self, space, w_proc): if w_proc is space.w_nil: w_proc = None else: assert isinstance(w_proc, W_ProcObject) space.getexecutioncontext().settraceproc(w_proc) def new_flag(moduledef, setter, getter, remover): @moduledef.method(setter) def setter_method(self, space): self.set_flag(space, getter) return self @moduledef.method(getter) def getter_method(self, space): return self.get_flag(space, getter) if remover is None: return (setter_method, getter_method) else: @moduledef.method(remover) def remover_method(self, space): self.unset_flag(space, getter) return self return (setter_method, getter_method, remover_method) method_untrust, method_untrusted, method_trust = new_flag(moduledef, "untrust", "untrusted?", "trust") method_taint, method_tainted, method_untaint = new_flag(moduledef, "taint", "tainted?", "untaint") method_freeze, method_frozen = new_flag(moduledef, "freeze", "frozen?", None) @moduledef.method("throw", name="symbol") def method_throw(self, space, name, w_value=None): from topaz.interpreter import Throw if not space.getexecutioncontext().is_in_catch_block_for_name(name): raise space.error(space.w_ArgumentError, "uncaught throw :%s" % name) if w_value is None: w_value = space.w_nil raise Throw(name, w_value) @moduledef.method("catch", name="symbol") def method_catch(self, space, name, block): if block is None: raise space.error(space.w_LocalJumpError, "no block given") from topaz.interpreter import Throw with space.getexecutioncontext().catch_block(name): try: return space.invoke_block(block, []) except Throw as e: if e.name == name: return e.w_value raise @moduledef.method("srand") def method_srand(self, space, w_seed=None): random_class = space.getclassfor(W_RandomObject) default = space.find_const(random_class, "DEFAULT") return default.srand(space, w_seed) @moduledef.method("autoload") def method_autoload(self, space, args_w): return space.send(space.getclass(self), "autoload", args_w) @moduledef.method("autoload?") def method_autoload(self, space, args_w): return space.send(space.getclass(self), "autoload?", args_w) @moduledef.method("object_id") def method_object_id(self, space): return space.send(self, "__id__") @moduledef.method("singleton_class") def method_singleton_class(self, space): return space.getsingletonclass(self) @moduledef.method("extend") @check_frozen() def method_extend(self, space, w_mod): if not space.is_kind_of(w_mod, space.w_module) or space.is_kind_of(w_mod, space.w_class): if space.is_kind_of(w_mod, space.w_class): name = "Class" else: name = space.obj_to_s(space.getclass(w_mod)) raise space.error( space.w_TypeError, "wrong argument type %s (expected Module)" % name ) space.send(w_mod, "extend_object", [self]) space.send(w_mod, "extended", [self]) @moduledef.method("inspect") def method_inspect(self, space): return space.send(self, "to_s") @moduledef.method("to_s") def method_to_s(self, space): return space.newstr_fromstr(space.any_to_s(self)) @moduledef.method("===") def method_eqeqeq(self, space, w_other): if self is w_other: return space.w_true return space.send(self, "==", [w_other]) @moduledef.method("send") def method_send(self, space, args_w, block): return space.send(self, "__send__", args_w, block) @moduledef.method("nil?") def method_nilp(self, space): return space.w_false @moduledef.method("hash") def method_hash(self, space): return space.newint(compute_identity_hash(self)) @moduledef.method("instance_variable_get", name="str") def method_instance_variable_get(self, space, name): return space.find_instance_var(self, name) @moduledef.method("instance_variable_set", name="str") @check_frozen() def method_instance_variable_set(self, space, name, w_value): space.set_instance_var(self, name, w_value) return w_value @moduledef.method("method") def method_method(self, space, w_sym): return space.send( space.send(space.getclass(self), "instance_method", [w_sym]), "bind", [self] ) @moduledef.method("tap") def method_tap(self, space, block): if block is not None: space.invoke_block(block, [self]) else: raise space.error(space.w_LocalJumpError, "no block given") return self @moduledef.method("define_singleton_method", name="symbol") @check_frozen() def method_define_singleton_method(self, space, name, w_method=None, block=None): args_w = [space.newsymbol(name)] if w_method is not None: args_w.append(w_method) return space.send(space.getsingletonclass(self), "define_method", args_w, block)
topazproject/topaz
topaz/modules/kernel.py
Python
bsd-3-clause
21,357
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-02-27 02:00 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='MenuItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(default='', max_length=80)), ('link', models.CharField(default='', max_length=80)), ], ), ]
azuer88/kerbus-alpha
src/kerbus/asimplemenu/migrations/0001_initial.py
Python
gpl-2.0
636
"""Plot to test HTML tooltip plugin As a data explorer, I want to add rich information to each point in a scatter plot, as details-on-demand""" import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np, pandas as pd from mpld3_rewrite import plugins css = """ table { border-collapse: collapse; } th { color: #ffffff; background-color: #000000; } td { background-color: #cccccc; } table, th, td { font-family:Arial, Helvetica, sans-serif; border: 1px solid black; text-align: right; } """ def main(): fig, ax = plt.subplots() N = 50 df = pd.DataFrame(index=range(N)) df['x'] = np.random.randn(N) df['y'] = np.random.randn(N) df['z'] = np.random.randn(N) labels = [] for i in range(N): label = df.ix[[i], :].T label.columns = ['Row {0}'.format(i)] labels.append(str(label.to_html())) # .to_html() is unicode, so make leading 'u' go away with str() points = ax.plot(df.x, df.y, 'o', color='k', mec='w', ms=15, mew=1, alpha=.9) ax.set_xlabel('x') ax.set_ylabel('y') tooltip = plugins.PointHTMLTooltip( points[0], labels, voffset=10, hoffset=10, css=css) plugins.connect(fig, tooltip) return fig if __name__ == '__main__': fig = main() plt.show()
mpld3/mpld3_rewrite
test_plots/test_plot_w_html_tooltips.py
Python
bsd-3-clause
1,286
""" Forms for use with User objects """ from django import forms from django.contrib.auth.models import User class UserForm(forms.ModelForm): """ Form for django.contrib.auth.models.User """ class Meta: """ Meta data for User Form """ model = User fields = ('username', 'email', 'password') def __init__(self, *args, **kwargs): super(UserForm, self).__init__(*args, **kwargs) self.fields['username'].required = True self.fields['email'].required = True self.fields['password'].required = True def save(self, commit=True): """ Override save so creates a user using create_user method on User model :param commit: Commit to DB or not :return: Instance of UserForm """ instance = super(UserForm, self).save(commit=False) User.objects.create_user( username=self.cleaned_data.get('username'), password=self.cleaned_data.get('password'), email=self.cleaned_data.get('email') ) return instance
Gimpneek/jobseek
jobseekr/cv/forms/user.py
Python
agpl-3.0
1,096
# -*- coding: utf-8 -*- """ Started on fri, dec 8th, 2017 @author: carlos.arana """ # Librerias utilizadas import pandas as pd import numpy as np import sys # Librerias locales utilizadas module_path = r'D:\PCCS\01_Dmine\Scripts' if module_path not in sys.path: sys.path.append(module_path) from SUN.asignar_sun import asignar_sun from VarInt.VarInt import VarInt from SUN_integridad.SUN_integridad import SUN_integridad from PCCS_variables.PCCS_variables import variables from ParametroEstandar.ParametroEstandar import ParametroEstandar from AsignarDimension.AsignarDimension import AsignarDimension from DocumentarParametro.DocumentarParametro import DocumentarParametro """ Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones: SCRIPT: | DISPONIBLE EN: ------ | ------------------------------------------------------------------------------------ asignar_sun | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/SUN VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt SUN_integridad | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/SUN_integridad variables | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/PCCS_variables ParametroEstandar | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/ParametroEstandar AsignarDimension | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/AsignarDimension DocumentarParametro | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/DocumentarParametro """ # Documentacion del Parametro --------------------------------------------------------------------------------------- # Descripciones del Parametro ClaveParametro = 'P0613' NombreParametro = 'Viviendas que utilizan gas para cocinar' DescParam = 'Porcentaje de viviendas particulares habitadas que utilizan gas para cocinar' UnidadesParam = 'Porcentaje' TituloParametro = 'VIV_GAS' # Para nombrar la columna del parametro PeriodoParam = '2015' DescVarIntegridad = 'La variable de integridad municipal para esta Dataset es binaria: \n' \ '1 = El municipio cuenta con informacion \n0 = El municipio no cuenta con información' # Descripciones del proceso de Minería nomarchivodataset = '08' ArchivoDataset = nomarchivodataset + '.xlsx' ContenidoHojaDatos = 'Datos disponibles por municipio para 2015, utilizados para la construcción del parametro' ClaveDataset = 'EI2015' ActDatos = '2015' Agregacion = 'Promedio del porcentaje de viviendas particulares habitadas que utilizan gas para cocinar en los ' \ 'municipios que componen una Ciudad del SUN. En la agregación de datos municipales a ciudades del SUN ' \ 'se han excluido los Municipos en los que la muestra de la Encuesta Intercensal fue clasificada como ' \ 'insuficiente.' # Descripciones generadas desde la clave del parámetro DirFuente = r'D:\PCCS\01_Dmine\Datasets\{}'.format(ClaveDataset) DSBase = '"{}.xlsx", disponible en ' \ 'https://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/{}'.format(ArchivoDataset, ClaveDataset) ClaveDimension = ClaveParametro[1:3] NomDimension = AsignarDimension(ClaveDimension)['nombre'] DirDimension = ClaveDimension + "_" + AsignarDimension(ClaveDimension)['directorio'] RepoMina = 'https://github.com/INECC-PCCS/01_Dmine/tree/master/{}/{}'.format(DirDimension, ClaveParametro) DirDestino = r'D:\PCCS\01_Dmine\{}'.format(ClaveDimension+"_"+AsignarDimension(ClaveDimension)['directorio']) # Cargar metadatos del dataset metadataset = pd.read_excel(DirFuente + '\\' + ArchivoDataset, sheetname="METADATOS") metadataset.set_index('Metadato', inplace=True) metadataset = metadataset['Descripcion'] # Descripciones generadas desde los metadatos del dataset. NomDataset = metadataset['Nombre del Dataset'] DescDataset = metadataset['Descripcion del dataset'] DispTemp = metadataset['Disponibilidad Temporal'] PeriodoAct = metadataset['Periodo de actualizacion'] DesagrMax = metadataset['Nivel de Desagregacion'] Notas = '' NomFuente = metadataset['Fuente'] UrlFuente = metadataset['URL_Fuente'] # Construccion del Parámetro ----------------------------------------------------------------------------------------- # Cargar dataset inicial dataset = pd.read_excel(DirFuente + '\\' + ArchivoDataset, sheetname=nomarchivodataset, dtype={'CVE_MUN': str}) dataset.set_index('CVE_MUN', inplace=True) # Generar datasets para parámetro y Variable de Integridad dataset = dataset[~dataset['Municipio'].str.contains('\*\*')] # Excluir municipios con ** muestra insuficiente colummas = ['Cocina_con_Gas'] dataset = dataset[colummas] par_dataset = dataset['Cocina_con_Gas'] # Construccion del Parámetro par_dataset = par_dataset.to_frame(name = ClaveParametro) par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo = 1) # Agregar datos por ciudad para parametro variables_SUN = ['CVE_MUN', 'NOM_MUN', 'CVE_SUN', 'NOM_SUN', 'NOM_ENT'] DatosLimpios = asignar_sun(par_dataset, vars=variables_SUN) OrdenColumnas = (variables_SUN + variables_dataset) DatosLimpios = DatosLimpios[OrdenColumnas] # Reordenar las columnas # Consolidar datos por ciudad para hoja_datos dataset.columns = [ClaveParametro+"_"+i for i in list(dataset)] var_disponibles = list(dataset) dataset['CVE_MUN'] = dataset.index hoja_datos = asignar_sun(dataset) hoja_datos = hoja_datos[(['CVE_MUN', 'CVE_SUN', 'NOM_SUN', 'TIPO_SUN'] + var_disponibles)].set_index('CVE_MUN') # Revision de integridad integridad_parametro = SUN_integridad(DatosLimpios) info_completa = sum(integridad_parametro['INTEGRIDAD']['INTEGRIDAD'] == 1) # Para generar grafico de integridad info_sin_info = sum(integridad_parametro['INTEGRIDAD']['INTEGRIDAD'] == 0) # Para generar grafico de integridad info_incomple = 135 - info_completa - info_sin_info # Para generar grafico de integridad # Construccion del Parametro param_dataset = DatosLimpios.set_index('CVE_SUN') param_dataset['CVE_SUN'] = param_dataset.index param = param_dataset.groupby(level=0).agg('mean')[ClaveParametro] # Agregacion por ciudad intparam = param_dataset.groupby(level=0).agg('mean')['VAR_INTEGRIDAD'] # Integridad por ciudad Tipo_Sun = integridad_parametro['EXISTENCIA']['TIPO_SUN'] Tipo_Sun = Tipo_Sun.groupby(Tipo_Sun.index).first() std_nomsun = param_dataset['CVE_SUN'].map(str)+' - '+param_dataset['NOM_SUN'] # Nombres estandar CVE_SUN + NOM_SUN std_nomsun.drop_duplicates(keep='first', inplace=True) Parametro = pd.DataFrame() Parametro['CIUDAD'] = std_nomsun Parametro['TIPO_SUN'] = Tipo_Sun Parametro[ClaveParametro] = param Parametro['INTEGRIDAD'] = intparam Parametro = Parametro.sort_index() # Lista de Variables variables_locales = sorted(list(set(list(DatosLimpios) + list(dataset) + list(integridad_parametro['INTEGRIDAD']) + list(integridad_parametro['EXISTENCIA']) + list(Parametro)))) metavariables = variables(variables_locales) # Metadatos d_parametro = { 'DESCRIPCION DEL PARAMETRO': np.nan, 'Clave': ClaveParametro, 'Nombre del Parametro': NombreParametro, 'Descripcion del Parametro': DescParam, 'Periodo' : PeriodoParam, 'Unidades': UnidadesParam } d_hojas = { 'METADATOS': 'Descripciones y notas relativas al Dataset', 'PARAMETRO': 'Dataset resultado de la minería, agregado por clave del Sistema Urbano Nacional, ' 'para utilizarse en la construcción de Indicadores', 'DATOS': ContenidoHojaDatos, 'INTEGRIDAD': 'Revision de integridad de la información POR CLAVE DEL SUN. ' 'Promedio de VAR_INTEGRIDAD de los municipios que componen una ciudad. ' 'Si no se tiene información para el municipio, VAR_INTEGRIDAD es igual a cero', 'EXISTENCIA': 'Revision de integridad de la información POR MUNICIPIO.', ' ': np.nan, 'DESCRIPCION DE VARIABLES': np.nan } d_mineria = { ' ': np.nan, 'DESCRIPCION DEL PROCESO DE MINERIA:': np.nan, 'Nombre del Dataset': NomDataset, 'Descripcion del dataset': DescDataset, 'Disponibilidad Temporal': DispTemp, 'Periodo de actualizacion': PeriodoAct, 'Nivel de Desagregacion': DesagrMax, 'Notas': Notas, 'Fuente': NomFuente, 'URL_Fuente': UrlFuente, 'Dataset base': DSBase, 'Repositorio de mineria': RepoMina, 'Método de Agregación': Agregacion, 'VAR_INTEGRIDAD': DescVarIntegridad, ' ': np.nan, 'HOJAS INCLUIDAS EN EL LIBRO': np.nan } descripcion_parametro = pd.DataFrame.from_dict(d_parametro, orient='index').rename(columns={0: 'DESCRIPCION'}) descripcion_mineria = pd.DataFrame.from_dict(d_mineria, orient='index').rename(columns={0: 'DESCRIPCION'}) descripcion_hojas = pd.DataFrame.from_dict(d_hojas, orient='index').rename(columns={0: 'DESCRIPCION'}) MetaParametro = descripcion_parametro.append(descripcion_mineria).append(descripcion_hojas).append(metavariables) # Diccionario de Descripciones DescParametro = { 'ClaveParametro': ClaveParametro, 'NombreParametro': NombreParametro, 'info_completa': info_completa, 'info_sin_info': info_sin_info, 'info_incomple': info_incomple, 'RutaSalida': DirDestino, 'Clave de Dimension': ClaveDimension, 'Nombre de Dimension': NomDimension, 'Titulo de Columna': TituloParametro, 'Actualizacion de datos': ActDatos } # Crear archivo de Excel y documentar parametro ParametroEstandar(DescParametro, MetaParametro, Parametro, DatosLimpios, integridad_parametro, hoja_datos) DocumentarParametro(DescParametro, MetaParametro, Parametro)
Caranarq/01_Dmine
06_Energia/P0613/P0613.py
Python
gpl-3.0
9,853
# coding=utf-8 # Author: Nic Wolfe <[email protected]> # URL: https://sickrage.github.io # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function, unicode_literals import sickbeard from sickbeard import logger from sickrage.helper.exceptions import ex from six.moves import urllib try: import json except ImportError: import simplejson as json class Notifier(object): def _notify_emby(self, message, host=None, emby_apikey=None): """Handles notifying Emby host via HTTP API Returns: Returns True for no issue or False if there was an error """ # fill in omitted parameters if not host: host = sickbeard.EMBY_HOST if not emby_apikey: emby_apikey = sickbeard.EMBY_APIKEY url = 'http://{0}/emby/Notifications/Admin'.format(host) values = {'Name': 'SickRage', 'Description': message, 'ImageUrl': sickbeard.LOGO_URL} data = json.dumps(values) try: req = urllib.request.Request(url, data) req.add_header('X-MediaBrowser-Token', emby_apikey) req.add_header('Content-Type', 'application/json') response = urllib.request.urlopen(req) result = response.read() response.close() logger.log('EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG) return True except (urllib.error.URLError, IOError) as e: logger.log('EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING) return False ############################################################################## # Public functions ############################################################################## def test_notify(self, host, emby_apikey): return self._notify_emby('This is a test notification from SickRage', host, emby_apikey) def update_library(self, show=None): """Handles updating the Emby Media Server host via HTTP API Returns: Returns True for no issue or False if there was an error """ if sickbeard.USE_EMBY: if not sickbeard.EMBY_HOST: logger.log('EMBY: No host specified, check your settings', logger.DEBUG) return False if show: if show.indexer == 1: provider = 'tvdb' elif show.indexer == 2: logger.log('EMBY: TVRage Provider no longer valid', logger.WARNING) return False else: logger.log('EMBY: Provider unknown', logger.WARNING) return False query = '?{0}id={1}'.format(provider, show.indexerid) else: query = '' url = 'http://{0}/emby/Library/Series/Updated{1}'.format(sickbeard.EMBY_HOST, query) values = {} data = urllib.parse.urlencode(values) try: req = urllib.request.Request(url, data) req.add_header('X-MediaBrowser-Token', sickbeard.EMBY_APIKEY) response = urllib.request.urlopen(req) result = response.read() response.close() logger.log('EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG) return True except (urllib.error.URLError, IOError) as e: logger.log('EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING) return False
nopjmp/SickRage
sickbeard/notifiers/emby.py
Python
gpl-3.0
4,210
#!/usr/bin/python import csv import os import sys #SOURCE_DIR="/vagrant/files/physionet-challenge-2012" SOURCE_DIR="/vagrant/files/datasource/physionet-challenge-2012" CONCEPT_FILE="/vagrant/files/datasource/physionet-challenge-2012/headeronly/concept.txt" #DEATH_FILE="/vagrant/files/datasource/physionet-challenge-2012/headeronly/Outcomes-a.txt" DEATH_FILE="%s/Outcomes-a.txt" % (SOURCE_DIR) TARGET_DIR="/home/c3po/physionet_data" #OUT_FILE="/home/c3po/ObsrvPhysionet.txt" OUT_FILE="/home/c3po/observation-merged.txt" conceptIdsMap={} def readConceptFile(): """ Read Concept files data and create a map of string and id index""" conceptFile = open(CONCEPT_FILE, 'rt') try: reader = csv.reader(conceptFile, delimiter=';') idx=0 for row in reader: if len(row) == 0 or idx == 0:#row is a list type idx=1 continue conceptIdsMap[row[1]]=int(row[0]) except Exception as error: print "Exception while reading concept data file : %s" % (error) sys.exit() finally: conceptFile.close() def processFilesData(): #print "In Process..." observationIndex=1600001 for input_file in os.listdir("%s/set-a" % TARGET_DIR): linesData="" patient_id=(input_file.split('.')[0]).strip() with open(("%s/set-a/%s" % (TARGET_DIR, input_file)), 'r') as f: for _ in xrange(7): next(f) for line in f: line.strip('\n') line.strip('\r') line.strip() obsTime,obsConcept,obsValue=line.split(',') obsHour, obsMin=obsTime.split(':') #If concept Id not found in concept file, ignore that if (conceptIdsMap.has_key(obsConcept.strip())): conceptId=conceptIdsMap[obsConcept.strip()] else: #print "Concept Id ' %s ' not found in concept.txt" % (obsConcept) continue #This is to convert date if hour value is more than 24 or 48 if (int(obsHour)>=24) and (int(obsHour) < 48): obsDate="01/02/14" actualHour=("%s" % (int(obsHour)-24)).zfill(2) obsTime="%s:%s" % (actualHour, obsMin) elif (int(obsHour)>= 48): obsDate="01/03/14" actualHour=("%s" % (int(obsHour)-48)).zfill(2) obsTime="%s:%s" % (actualHour, obsMin) else: obsDate="01/01/14" # This is to convert any exponential data to float and then int value if obsValue.find('e') != -1: obsExpData=int(float(obsValue)) obsValue=('%s'% obsExpData) linesData += "%d;%s;%s;%s;%s;%s;NULL;NULL;NULL;NULL;NULL;0;0;NULL;NULL;NULL;NULL\n" % (observationIndex, patient_id, conceptId, obsDate.strip(), obsTime.strip(), obsValue.strip()) observationIndex += 1 f.close() writeToFile(linesData) return observationIndex def writeToFile(lines): outfile=open(OUT_FILE, "a") outfile.write(lines) outfile.close() def createOutFileHeader(): """Creating file and Header line""" outfile=open(OUT_FILE, "w") header="observation_id;person_id;observation_concept_id;observation_date;" +\ "observation_time;value_as_number;value_as_string;value_as_concept_id;unit_concept_id;" +\ "range_low;range_high;observation_type_concept_id;associated_provider_id;visit_occurrence_id;" +\ "relevant_condition_concept_id;observation_source_value;units_source_value\n" outfile.write(header) outfile.close() def extractPhysionetZip(): try: os.mkdir(TARGET_DIR) except OSError: #print "Ignore" pass try: cmd="tar -zxf %s/*.gz -C %s" % (SOURCE_DIR, TARGET_DIR) rc=os.system(cmd) #print "rc is ", rc except Exception as e: print "Exception: ", e def copyToHadoop(): """Copying Observation file to Hadoop file system""" try: cmd="[[ -f /usr/bin/dos2unix ]] && dos2unix -l %s" % (OUT_FILE) rc=os.system(cmd) #print rc #cmd="hadoop fs -copyFromLocal %s /data/C3PO_CDW" % (OUT_FILE) cmd="hdfs dfs -copyFromLocal -f %s /data/C3PO_CDW" % (OUT_FILE) print "Copying from Local to Hadoop file system::: ", cmd rc=os.system(cmd) #print rc except Exception as e: print "Exception: ", e def processDeathFileData(index): observationIndex=index linesData="" print "Inside processing Death file" if os.path.isfile(DEATH_FILE) and os.access(DEATH_FILE, os.R_OK): print "File exists and is readable" deathFile = open(DEATH_FILE, 'rt') try: reader = csv.reader(deathFile, delimiter=',') idx=0 for row in reader: if len(row) == 0 or idx == 0:#row is a list type idx=1 continue patient_id=row[0] death_value=row[5] obsDate="01/01/14" obsTime="23:59" conceptId=43 linesData += "%d;%s;%s;%s;%s;%s;NULL;NULL;NULL;NULL;NULL;0;0;NULL;NULL;NULL;NULL\n" % (observationIndex, patient_id, conceptId, obsDate.strip(), obsTime.strip(), death_value.strip()) observationIndex += 1 writeToFile(linesData) except Exception as error: print "Exception while reading concept data file : %s" % (error) finally: deathFile.close() else: print "Either Death file is missing or is not readable" print "Death file not found, Not writing death info to Observation file" def main(argv): try: readConceptFile() extractPhysionetZip() createOutFileHeader() obsrvIndex=processFilesData() processDeathFileData(obsrvIndex) #copyToHadoop() except Exception as error: print "Exception: %s" % (error) if __name__ == "__main__": main(sys.argv[1:]) #OBSERVATION;observation_id;person_id;observation_concept_id;observation_date;observation_time;value_as_number;value_as_string;value_as_concept_id;unit_concept_id;range_low;range_high;observation_type_concept_id;associated_provider_id;visit_occurrence_id;relevant_condition_concept_id;observation_source_value;units_source_value #OBSERVATION;2676118;138312;16;01/01/14;06:08;4.9;NULL;NULL;NULL;NULL;NULL;0;0;NULL;NULL;NULL;NULL
Clinical3PO/Platform
dev/Clinical3PO-Vagrant/files/datasource/physionet-challenge-2012/etl/dataTransform.py
Python
apache-2.0
6,635
#!/usr/bin/env python # Copyright (c) 2014, Stanford University # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys # Command line tool to look up the human readable strings that # correspond to edX platform generated hash strings for problems and # videos. The given strings may be just the 32 bit hex numbers, or the # long strings in tracking logs that contain that hex number somewhere # inside it # Add json_to_relation source dir to $PATH # for duration of this execution: source_dir = [os.path.join(os.path.dirname(os.path.abspath(__file__)), "../json_to_relation/")] source_dir.extend(sys.path) sys.path = source_dir from modulestoreImporter import ModulestoreImporter from edxTrackLogJSONParser import EdXTrackLogJSONParser if __name__ == '__main__': USAGE = 'Usage: lookupOpenEdxHash.py hashStr1 hashstr2 ...' if len(sys.argv) < 2: print(USAGE) sys.exit() hashLookup = ModulestoreImporter(os.path.join(os.path.dirname(__file__),'../json_to_relation/data/modulestore_latest.json'), useCache=True) for hashStr in sys.argv[1:]: match = EdXTrackLogJSONParser.findHashPattern.search(hashStr) if match is not None: print(hashLookup.getDisplayName(match.group(1))) else: print 'None'
paepcke/json_to_relation
scripts/lookupOpenEdxHash.py
Python
bsd-3-clause
2,745
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # Copyright 2011 - 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import itertools import time import uuid import eventlet import greenlet from oslo.config import cfg from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import importutils from cinder.openstack.common import jsonutils from cinder.openstack.common import log as logging from cinder.openstack.common.rpc import amqp as rpc_amqp from cinder.openstack.common.rpc import common as rpc_common qpid_messaging = importutils.try_import("qpid.messaging") qpid_exceptions = importutils.try_import("qpid.messaging.exceptions") LOG = logging.getLogger(__name__) qpid_opts = [ cfg.StrOpt('qpid_hostname', default='localhost', help='Qpid broker hostname'), cfg.IntOpt('qpid_port', default=5672, help='Qpid broker port'), cfg.ListOpt('qpid_hosts', default=['$qpid_hostname:$qpid_port'], help='Qpid HA cluster host:port pairs'), cfg.StrOpt('qpid_username', default='', help='Username for qpid connection'), cfg.StrOpt('qpid_password', default='', help='Password for qpid connection', secret=True), cfg.StrOpt('qpid_sasl_mechanisms', default='', help='Space separated list of SASL mechanisms to use for auth'), cfg.IntOpt('qpid_heartbeat', default=60, help='Seconds between connection keepalive heartbeats'), cfg.StrOpt('qpid_protocol', default='tcp', help="Transport to use, either 'tcp' or 'ssl'"), cfg.BoolOpt('qpid_tcp_nodelay', default=True, help='Disable Nagle algorithm'), # NOTE(russellb) If any additional versions are added (beyond 1 and 2), # this file could probably use some additional refactoring so that the # differences between each version are split into different classes. cfg.IntOpt('qpid_topology_version', default=1, help="The qpid topology version to use. Version 1 is what " "was originally used by impl_qpid. Version 2 includes " "some backwards-incompatible changes that allow broker " "federation to work. Users should update to version 2 " "when they are able to take everything down, as it " "requires a clean break."), ] cfg.CONF.register_opts(qpid_opts) def raise_invalid_topology_version(conf): msg = (_("Invalid value for qpid_topology_version: %d") % conf.qpid_topology_version) LOG.error(msg) raise Exception(msg) class ConsumerBase(object): """Consumer base class.""" def __init__(self, conf, session, callback, node_name, node_opts, link_name, link_opts): """Declare a queue on an amqp session. 'session' is the amqp session to use 'callback' is the callback to call when messages are received 'node_name' is the first part of the Qpid address string, before ';' 'node_opts' will be applied to the "x-declare" section of "node" in the address string. 'link_name' goes into the "name" field of the "link" in the address string 'link_opts' will be applied to the "x-declare" section of "link" in the address string. """ self.callback = callback self.receiver = None self.session = None if conf.qpid_topology_version == 1: addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": True, "auto-delete": True, }, }, "link": { "name": link_name, "durable": True, "x-declare": { "durable": False, "auto-delete": True, "exclusive": False, }, }, } addr_opts["node"]["x-declare"].update(node_opts) elif conf.qpid_topology_version == 2: addr_opts = { "link": { "x-declare": { "auto-delete": True, }, }, } else: raise_invalid_topology_version() addr_opts["link"]["x-declare"].update(link_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) self.reconnect(session) def reconnect(self, session): """Re-declare the receiver after a qpid reconnect""" self.session = session self.receiver = session.receiver(self.address) self.receiver.capacity = 1 def consume(self): """Fetch the message and pass it to the callback object""" message = self.receiver.fetch() try: msg = rpc_common.deserialize_msg(message.content) self.callback(msg) except Exception: LOG.exception(_("Failed to process message... skipping it.")) finally: self.session.acknowledge(message) def get_receiver(self): return self.receiver class DirectConsumer(ConsumerBase): """Queue/consumer class for 'direct'""" def __init__(self, conf, session, msg_id, callback): """Init a 'direct' queue. 'session' is the amqp session to use 'msg_id' is the msg_id to listen on 'callback' is the callback to call when messages are received """ link_opts = { "auto-delete": conf.amqp_auto_delete, "exclusive": True, "durable": conf.amqp_durable_queues, } if conf.qpid_topology_version == 1: node_name = "%s/%s" % (msg_id, msg_id) node_opts = {"type": "direct"} elif conf.qpid_topology_version == 2: node_name = "amq.direct/%s" % msg_id node_opts = {} else: raise_invalid_topology_version() super(DirectConsumer, self).__init__(conf, session, callback, node_name, node_opts, msg_id, link_opts) class TopicConsumer(ConsumerBase): """Consumer class for 'topic'""" def __init__(self, conf, session, topic, callback, name=None, exchange_name=None): """Init a 'topic' queue. :param session: the amqp session to use :param topic: is the topic to listen on :paramtype topic: str :param callback: the callback to call when messages are received :param name: optional queue name, defaults to topic """ exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf) link_opts = { "auto-delete": conf.amqp_auto_delete, "durable": conf.amqp_durable_queues, } if conf.qpid_topology_version == 1: node_name = "%s/%s" % (exchange_name, topic) elif conf.qpid_topology_version == 2: node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) else: raise_invalid_topology_version() super(TopicConsumer, self).__init__(conf, session, callback, node_name, {}, name or topic, link_opts) class FanoutConsumer(ConsumerBase): """Consumer class for 'fanout'""" def __init__(self, conf, session, topic, callback): """Init a 'fanout' queue. 'session' is the amqp session to use 'topic' is the topic to listen on 'callback' is the callback to call when messages are received """ link_opts = {"exclusive": True} if conf.qpid_topology_version == 1: node_name = "%s_fanout" % topic node_opts = {"durable": False, "type": "fanout"} link_name = "%s_fanout_%s" % (topic, uuid.uuid4().hex) elif conf.qpid_topology_version == 2: node_name = "amq.topic/fanout/%s" % topic node_opts = {} link_name = "" else: raise_invalid_topology_version() super(FanoutConsumer, self).__init__(conf, session, callback, node_name, node_opts, link_name, link_opts) class Publisher(object): """Base Publisher class""" def __init__(self, conf, session, node_name, node_opts=None): """Init the Publisher class with the exchange_name, routing_key, and other options """ self.sender = None self.session = session if conf.qpid_topology_version == 1: addr_opts = { "create": "always", "node": { "type": "topic", "x-declare": { "durable": False, # auto-delete isn't implemented for exchanges in qpid, # but put in here anyway "auto-delete": True, }, }, } if node_opts: addr_opts["node"]["x-declare"].update(node_opts) self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts)) elif conf.qpid_topology_version == 2: self.address = node_name else: raise_invalid_topology_version() self.reconnect(session) def reconnect(self, session): """Re-establish the Sender after a reconnection""" self.sender = session.sender(self.address) def send(self, msg): """Send a message""" self.sender.send(msg) class DirectPublisher(Publisher): """Publisher class for 'direct'""" def __init__(self, conf, session, msg_id): """Init a 'direct' publisher.""" if conf.qpid_topology_version == 1: node_name = msg_id node_opts = {"type": "direct"} elif conf.qpid_topology_version == 2: node_name = "amq.direct/%s" % msg_id node_opts = {} else: raise_invalid_topology_version() super(DirectPublisher, self).__init__(conf, session, node_name, node_opts) class TopicPublisher(Publisher): """Publisher class for 'topic'""" def __init__(self, conf, session, topic): """init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) if conf.qpid_topology_version == 1: node_name = "%s/%s" % (exchange_name, topic) elif conf.qpid_topology_version == 2: node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) else: raise_invalid_topology_version() super(TopicPublisher, self).__init__(conf, session, node_name) class FanoutPublisher(Publisher): """Publisher class for 'fanout'""" def __init__(self, conf, session, topic): """init a 'fanout' publisher. """ if conf.qpid_topology_version == 1: node_name = "%s_fanout" % topic node_opts = {"type": "fanout"} elif conf.qpid_topology_version == 2: node_name = "amq.topic/fanout/%s" % topic node_opts = {} else: raise_invalid_topology_version() super(FanoutPublisher, self).__init__(conf, session, node_name, node_opts) class NotifyPublisher(Publisher): """Publisher class for notifications""" def __init__(self, conf, session, topic): """init a 'topic' publisher. """ exchange_name = rpc_amqp.get_control_exchange(conf) node_opts = {"durable": True} if conf.qpid_topology_version == 1: node_name = "%s/%s" % (exchange_name, topic) elif conf.qpid_topology_version == 2: node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic) else: raise_invalid_topology_version() super(NotifyPublisher, self).__init__(conf, session, node_name, node_opts) class Connection(object): """Connection object.""" pool = None def __init__(self, conf, server_params=None): if not qpid_messaging: raise ImportError("Failed to import qpid.messaging") self.session = None self.consumers = {} self.consumer_thread = None self.proxy_callbacks = [] self.conf = conf if server_params and 'hostname' in server_params: # NOTE(russellb) This enables support for cast_to_server. server_params['qpid_hosts'] = [ '%s:%d' % (server_params['hostname'], server_params.get('port', 5672)) ] params = { 'qpid_hosts': self.conf.qpid_hosts, 'username': self.conf.qpid_username, 'password': self.conf.qpid_password, } params.update(server_params or {}) self.brokers = params['qpid_hosts'] self.username = params['username'] self.password = params['password'] self.connection_create(self.brokers[0]) self.reconnect() def connection_create(self, broker): # Create the connection - this does not open the connection self.connection = qpid_messaging.Connection(broker) # Check if flags are set and if so set them for the connection # before we call open self.connection.username = self.username self.connection.password = self.password self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms # Reconnection is done by self.reconnect() self.connection.reconnect = False self.connection.heartbeat = self.conf.qpid_heartbeat self.connection.transport = self.conf.qpid_protocol self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay def _register_consumer(self, consumer): self.consumers[str(consumer.get_receiver())] = consumer def _lookup_consumer(self, receiver): return self.consumers[str(receiver)] def reconnect(self): """Handles reconnecting and re-establishing sessions and queues""" attempt = 0 delay = 1 while True: # Close the session if necessary if self.connection.opened(): try: self.connection.close() except qpid_exceptions.ConnectionError: pass broker = self.brokers[attempt % len(self.brokers)] attempt += 1 try: self.connection_create(broker) self.connection.open() except qpid_exceptions.ConnectionError, e: msg_dict = dict(e=e, delay=delay) msg = _("Unable to connect to AMQP server: %(e)s. " "Sleeping %(delay)s seconds") % msg_dict LOG.error(msg) time.sleep(delay) delay = min(2 * delay, 60) else: LOG.info(_('Connected to AMQP server on %s'), broker) break self.session = self.connection.session() if self.consumers: consumers = self.consumers self.consumers = {} for consumer in consumers.itervalues(): consumer.reconnect(self.session) self._register_consumer(consumer) LOG.debug(_("Re-established AMQP queues")) def ensure(self, error_callback, method, *args, **kwargs): while True: try: return method(*args, **kwargs) except (qpid_exceptions.Empty, qpid_exceptions.ConnectionError), e: if error_callback: error_callback(e) self.reconnect() def close(self): """Close/release this connection""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.connection.close() self.connection = None def reset(self): """Reset a connection so it can be used again""" self.cancel_consumer_thread() self.wait_on_proxy_callbacks() self.session.close() self.session = self.connection.session() self.consumers = {} def declare_consumer(self, consumer_cls, topic, callback): """Create a Consumer using the class that was passed in and add it to our list of consumers """ def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.error(_("Failed to declare consumer for topic '%(topic)s': " "%(err_str)s") % log_info) def _declare_consumer(): consumer = consumer_cls(self.conf, self.session, topic, callback) self._register_consumer(consumer) return consumer return self.ensure(_connect_error, _declare_consumer) def iterconsume(self, limit=None, timeout=None): """Return an iterator that will consume from all queues/consumers""" def _error_callback(exc): if isinstance(exc, qpid_exceptions.Empty): LOG.debug(_('Timed out waiting for RPC response: %s') % str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % str(exc)) def _consume(): nxt_receiver = self.session.next_receiver(timeout=timeout) try: self._lookup_consumer(nxt_receiver).consume() except Exception: LOG.exception(_("Error processing message. Skipping it.")) for iteration in itertools.count(0): if limit and iteration >= limit: raise StopIteration yield self.ensure(_error_callback, _consume) def cancel_consumer_thread(self): """Cancel a consumer thread""" if self.consumer_thread is not None: self.consumer_thread.kill() try: self.consumer_thread.wait() except greenlet.GreenletExit: pass self.consumer_thread = None def wait_on_proxy_callbacks(self): """Wait for all proxy callback threads to exit.""" for proxy_cb in self.proxy_callbacks: proxy_cb.wait() def publisher_send(self, cls, topic, msg): """Send to a publisher based on the publisher class""" def _connect_error(exc): log_info = {'topic': topic, 'err_str': str(exc)} LOG.exception(_("Failed to publish message to topic " "'%(topic)s': %(err_str)s") % log_info) def _publisher_send(): publisher = cls(self.conf, self.session, topic) publisher.send(msg) return self.ensure(_connect_error, _publisher_send) def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ self.declare_consumer(DirectConsumer, topic, callback) def declare_topic_consumer(self, topic, callback=None, queue_name=None, exchange_name=None): """Create a 'topic' consumer.""" self.declare_consumer(functools.partial(TopicConsumer, name=queue_name, exchange_name=exchange_name, ), topic, callback) def declare_fanout_consumer(self, topic, callback): """Create a 'fanout' consumer""" self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message""" self.publisher_send(DirectPublisher, msg_id, msg) def topic_send(self, topic, msg, timeout=None): """Send a 'topic' message""" # # We want to create a message with attributes, e.g. a TTL. We # don't really need to keep 'msg' in its JSON format any longer # so let's create an actual qpid message here and get some # value-add on the go. # # WARNING: Request timeout happens to be in the same units as # qpid's TTL (seconds). If this changes in the future, then this # will need to be altered accordingly. # qpid_message = qpid_messaging.Message(content=msg, ttl=timeout) self.publisher_send(TopicPublisher, topic, qpid_message) def fanout_send(self, topic, msg): """Send a 'fanout' message""" self.publisher_send(FanoutPublisher, topic, msg) def notify_send(self, topic, msg, **kwargs): """Send a notify message on a topic""" self.publisher_send(NotifyPublisher, topic, msg) def consume(self, limit=None): """Consume from all queues/consumers""" it = self.iterconsume(limit=limit) while True: try: it.next() except StopIteration: return def consume_in_thread(self): """Consumer from all queues/consumers in a greenthread""" def _consumer_thread(): try: self.consume() except greenlet.GreenletExit: return if self.consumer_thread is None: self.consumer_thread = eventlet.spawn(_consumer_thread) return self.consumer_thread def create_consumer(self, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) self.proxy_callbacks.append(proxy_cb) if fanout: consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb) else: consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb) self._register_consumer(consumer) return consumer def create_worker(self, topic, proxy, pool_name): """Create a worker that calls a method in a proxy object""" proxy_cb = rpc_amqp.ProxyCallback( self.conf, proxy, rpc_amqp.get_connection_pool(self.conf, Connection)) self.proxy_callbacks.append(proxy_cb) consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb, name=pool_name) self._register_consumer(consumer) return consumer def join_consumer_pool(self, callback, pool_name, topic, exchange_name=None): """Register as a member of a group of consumers for a given topic from the specified exchange. Exactly one member of a given pool will receive each message. A message will be delivered to multiple pools, if more than one is created. """ callback_wrapper = rpc_amqp.CallbackWrapper( conf=self.conf, callback=callback, connection_pool=rpc_amqp.get_connection_pool(self.conf, Connection), ) self.proxy_callbacks.append(callback_wrapper) consumer = TopicConsumer(conf=self.conf, session=self.session, topic=topic, callback=callback_wrapper, name=pool_name, exchange_name=exchange_name) self._register_consumer(consumer) return consumer def create_connection(conf, new=True): """Create a connection""" return rpc_amqp.create_connection( conf, new, rpc_amqp.get_connection_pool(conf, Connection)) def multicall(conf, context, topic, msg, timeout=None): """Make a call that returns multiple times.""" return rpc_amqp.multicall( conf, context, topic, msg, timeout, rpc_amqp.get_connection_pool(conf, Connection)) def call(conf, context, topic, msg, timeout=None): """Sends a message on a topic and wait for a response.""" return rpc_amqp.call( conf, context, topic, msg, timeout, rpc_amqp.get_connection_pool(conf, Connection)) def cast(conf, context, topic, msg): """Sends a message on a topic without waiting for a response.""" return rpc_amqp.cast( conf, context, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def fanout_cast(conf, context, topic, msg): """Sends a message on a fanout exchange without waiting for a response.""" return rpc_amqp.fanout_cast( conf, context, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def cast_to_server(conf, context, server_params, topic, msg): """Sends a message on a topic to a specific server.""" return rpc_amqp.cast_to_server( conf, context, server_params, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def fanout_cast_to_server(conf, context, server_params, topic, msg): """Sends a message on a fanout exchange to a specific server.""" return rpc_amqp.fanout_cast_to_server( conf, context, server_params, topic, msg, rpc_amqp.get_connection_pool(conf, Connection)) def notify(conf, context, topic, msg, envelope): """Sends a notification event on a topic.""" return rpc_amqp.notify(conf, context, topic, msg, rpc_amqp.get_connection_pool(conf, Connection), envelope) def cleanup(): return rpc_amqp.cleanup(Connection.pool)
ntt-sic/cinder
cinder/openstack/common/rpc/impl_qpid.py
Python
apache-2.0
26,814
# coding=utf-8 import glob, hashlib, os, re, shutil, subprocess, sys from textwrap import dedent import tools.shared from tools.shared import * from tools.line_endings import check_line_endings from runner import RunnerCore, path_from_root, checked_sanity, test_modes, get_zlib_library, get_bullet_library class T(RunnerCore): # Short name, to make it more fun to use manually on the commandline def is_emterpreter(self): return 'EMTERPRETIFY=1' in self.emcc_args def test_hello_world(self): test_path = path_from_root('tests', 'core', 'test_hello_world') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) src = open(self.in_dir('src.cpp.o.js')).read() assert 'EMSCRIPTEN_GENERATED_FUNCTIONS' not in src, 'must not emit this unneeded internal thing' def test_intvars(self): test_path = path_from_root('tests', 'core', 'test_intvars') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sintvars(self): test_path = path_from_root('tests', 'core', 'test_sintvars') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, force_c=True) def test_i64(self): src = ''' #include <stdio.h> int main() { long long a = 0x2b00505c10; long long b = a >> 29; long long c = a >> 32; long long d = a >> 34; printf("*%Ld,%Ld,%Ld,%Ld*\\n", a, b, c, d); unsigned long long ua = 0x2b00505c10; unsigned long long ub = ua >> 29; unsigned long long uc = ua >> 32; unsigned long long ud = ua >> 34; printf("*%Ld,%Ld,%Ld,%Ld*\\n", ua, ub, uc, ud); long long x = 0x0000def123450789ULL; // any bigger than this, and we long long y = 0x00020ef123456089ULL; // start to run into the double precision limit! printf("*%Ld,%Ld,%Ld,%Ld,%Ld*\\n", x, y, x | y, x & y, x ^ y, x >> 2, y << 2); printf("*"); long long z = 13; int n = 0; while (z > 1) { printf("%.2f,", (float)z); // these must be integers! z = z >> 1; n++; } printf("*%d*\\n", n); return 0; } ''' self.do_run(src, '*184688860176,344,43,10*\n*184688860176,344,43,10*\n*245127260211081,579378795077769,808077213656969,16428841631881,791648372025088*\n*13.00,6.00,3.00,*3*') src = r''' #include <time.h> #include <stdio.h> #include <stdint.h> int64_t returner1() { return 0x0000def123450789ULL; } int64_t returner2(int test) { while (test > 10) test /= 2; // confuse the compiler so it doesn't eliminate this function return test > 5 ? 0x0000def123450123ULL : 0ULL; } void modifier1(int64_t t) { t |= 12; printf("m1: %Ld\n", t); } void modifier2(int64_t &t) { t |= 12; } int truthy() { int x = time(0); while (x > 10) { x |= 7; x /= 2; } return x < 3; } struct IUB { int c; long long d; }; IUB iub[] = { { 55, 17179869201 }, { 122, 25769803837 }, }; int main(int argc, char **argv) { int64_t x1 = 0x1234def123450789ULL; int64_t x2 = 0x1234def123450788ULL; int64_t x3 = 0x1234def123450789ULL; printf("*%Ld\n%d,%d,%d,%d,%d\n%d,%d,%d,%d,%d*\n", x1, x1==x2, x1<x2, x1<=x2, x1>x2, x1>=x2, // note: some rounding in the printing! x1==x3, x1<x3, x1<=x3, x1>x3, x1>=x3); printf("*%Ld*\n", returner1()); printf("*%Ld*\n", returner2(30)); uint64_t maxx = -1ULL; printf("*%Lu*\n*%Lu*\n", maxx, maxx >> 5); // Make sure params are not modified if they shouldn't be int64_t t = 123; modifier1(t); printf("*%Ld*\n", t); modifier2(t); printf("*%Ld*\n", t); // global structs with i64s printf("*%d,%Ld*\n*%d,%Ld*\n", iub[0].c, iub[0].d, iub[1].c, iub[1].d); // Bitshifts { int64_t a = -1; int64_t b = a >> 29; int64_t c = a >> 32; int64_t d = a >> 34; printf("*%Ld,%Ld,%Ld,%Ld*\n", a, b, c, d); uint64_t ua = -1; int64_t ub = ua >> 29; int64_t uc = ua >> 32; int64_t ud = ua >> 34; printf("*%Ld,%Ld,%Ld,%Ld*\n", ua, ub, uc, ud); } // Nonconstant bitshifts { int64_t a = -1; int64_t b = a >> (29 - argc + 1); int64_t c = a >> (32 - argc + 1); int64_t d = a >> (34 - argc + 1); printf("*%Ld,%Ld,%Ld,%Ld*\n", a, b, c, d); uint64_t ua = -1; int64_t ub = ua >> (29 - argc + 1); int64_t uc = ua >> (32 - argc + 1); int64_t ud = ua >> (34 - argc + 1); printf("*%Ld,%Ld,%Ld,%Ld*\n", ua, ub, uc, ud); } // Math mixtures with doubles { uint64_t a = 5; double b = 6.8; uint64_t c = a * b; if (truthy()) printf("*%d,%d,%d*\n", (int)&a, (int)&b, (int)&c); // printing addresses prevents optimizations printf("*prod:%llu*\n", c); } // Basic (rounded, for now) math. Just check compilation. int64_t a = 0x1234def123450789ULL; a--; if (truthy()) a--; // confuse optimizer int64_t b = 0x1234000000450789ULL; b++; if (truthy()) b--; // confuse optimizer printf("*%Ld,%Ld,%Ld,%Ld*\n", (a+b)/5000, (a-b)/5000, (a*3)/5000, (a/5)/5000); a -= 17; if (truthy()) a += 5; // confuse optimizer b -= 17; if (truthy()) b += 121; // confuse optimizer printf("*%Lx,%Lx,%Lx,%Lx*\n", b - a, b - a/2, b/2 - a, b - 20); if (truthy()) a += 5/b; // confuse optimizer if (truthy()) b += 121*(3+a/b); // confuse optimizer printf("*%Lx,%Lx,%Lx,%Lx*\n", a - b, a - b/2, a/2 - b, a - 20); return 0; } ''' self.do_run(src, '*1311918518731868041\n' + '0,0,0,1,1\n' + '1,0,1,0,1*\n' + '*245127260211081*\n' + '*245127260209443*\n' + '*18446744073709551615*\n' + '*576460752303423487*\n' + 'm1: 127\n' + '*123*\n' + '*127*\n' + '*55,17179869201*\n' + '*122,25769803837*\n' + '*-1,-1,-1,-1*\n' + '*-1,34359738367,4294967295,1073741823*\n' + '*-1,-1,-1,-1*\n' + '*-1,34359738367,4294967295,1073741823*\n' + '*prod:34*\n' + '*524718382041609,49025451137,787151111239120,52476740749274*\n' + '*ffff210edd000002,91990876ea283be,f6e5210edcdd7c45,1234000000450765*\n' + '*def122fffffe,91adef1232283bb,f6e66f78915d7c42,1234def123450763*\n') src = r''' #include <stdio.h> #include <limits> int main() { long long i,j,k; i = 0; j = -1, k = 1; printf( "*\n" ); printf( "%s\n", i > j ? "Ok": "Fail" ); printf( "%s\n", k > i ? "Ok": "Fail" ); printf( "%s\n", k > j ? "Ok": "Fail" ); printf( "%s\n", i < j ? "Fail": "Ok" ); printf( "%s\n", k < i ? "Fail": "Ok" ); printf( "%s\n", k < j ? "Fail": "Ok" ); printf( "%s\n", (i-j) >= k ? "Ok": "Fail" ); printf( "%s\n", (i-j) <= k ? "Ok": "Fail" ); printf( "%s\n", i > std::numeric_limits<long long>::min() ? "Ok": "Fail" ); printf( "%s\n", i < std::numeric_limits<long long>::max() ? "Ok": "Fail" ); printf( "*\n" ); } ''' self.do_run(src, '*\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\n*') # stuff that also needs sign corrections src = r''' #include <stdio.h> #include <stdint.h> int main() { // i32 vs i64 int32_t small = -1; int64_t large = -1; printf("*%d*\n", small == large); small++; printf("*%d*\n", small == large); uint32_t usmall = -1; uint64_t ularge = -1; printf("*%d*\n", usmall == ularge); return 0; } ''' self.do_run(src, '*1*\n*0*\n*0*\n') def test_i64_b(self): test_path = path_from_root('tests', 'core', 'test_i64_b') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i64_cmp(self): test_path = path_from_root('tests', 'core', 'test_i64_cmp') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i64_cmp2(self): test_path = path_from_root('tests', 'core', 'test_i64_cmp2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i64_double(self): test_path = path_from_root('tests', 'core', 'test_i64_double') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i64_umul(self): test_path = path_from_root('tests', 'core', 'test_i64_umul') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i64_precise(self): src = r''' #include <inttypes.h> #include <stdio.h> int main() { uint64_t x = 0, y = 0; for (int i = 0; i < 64; i++) { x += 1ULL << i; y += x; x /= 3; y *= 5; printf("unsigned %d: %llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n", i, x, y, x+y, x-y, x*y, y ? x/y : 0, x ? y/x : 0, y ? x%y : 0, x ? y%x : 0); } int64_t x2 = 0, y2 = 0; for (int i = 0; i < 64; i++) { x2 += 1LL << i; y2 += x2; x2 /= 3 * (i % 7 ? -1 : 1); y2 *= 5 * (i % 2 ? -1 : 1); printf("signed %d: %lld,%lld,%lld,%lld,%lld,%lld,%lld,%lld,%lld\n", i, x2, y2, x2+y2, x2-y2, x2*y2, y2 ? x2/y2 : 0, x2 ? y2/x2 : 0, y2 ? x2%y2 : 0, x2 ? y2%x2 : 0); } return 0; } ''' self.do_run(src, open(path_from_root('tests', 'i64_precise.txt')).read()) # Verify that even if we ask for precision, if it is not needed it is not included Settings.PRECISE_I64_MATH = 1 src = ''' #include <inttypes.h> #include <stdio.h> int main(int argc, char **argv) { uint64_t x = 2125299906845564, y = 1225891506842664; if (argc == 12) { x = x >> 1; y = y >> 1; } x = x & 12ULL; y = y | 12ULL; x = x ^ y; x <<= 2; y >>= 3; printf("*%llu, %llu*\\n", x, y); } ''' self.do_run(src, '*4903566027370624, 153236438355333*') code = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read() assert 'goog.math.Long' not in code, 'i64 precise math should not have been included if not actually used' # But if we force it to be included, it is. First, a case where we don't need it Settings.PRECISE_I64_MATH = 2 self.do_run(open(path_from_root('tests', 'hello_world.c')).read(), 'hello') code = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read() assert 'goog.math.Long' in code, 'i64 precise math should be included if forced' # and now one where we do self.do_run(r''' #include <stdio.h> int main( int argc, char ** argv ) { unsigned long a = 0x60DD1695U; unsigned long b = 0xCA8C4E7BU; unsigned long long c = (unsigned long long)a * b; printf( "c = %016llx\n", c ); return 0; } ''', 'c = 4ca38a6bd2973f97') def test_i64_llabs(self): Settings.PRECISE_I64_MATH = 2 test_path = path_from_root('tests', 'core', 'test_i64_llabs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i64_zextneg(self): test_path = path_from_root('tests', 'core', 'test_i64_zextneg') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i64_7z(self): test_path = path_from_root('tests', 'core', 'test_i64_7z') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, ['hallo']) def test_i64_i16(self): test_path = path_from_root('tests', 'core', 'test_i64_i16') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i64_qdouble(self): test_path = path_from_root('tests', 'core', 'test_i64_qdouble') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i64_varargs(self): test_path = path_from_root('tests', 'core', 'test_i64_varargs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, 'waka fleefl asdfasdfasdfasdf'.split(' ')) def test_llvm_fabs(self): Settings.PRECISE_F32 = 1 test_path = path_from_root('tests', 'core', 'test_llvm_fabs') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def test_double_varargs(self): test_path = path_from_root('tests', 'core', 'test_double_varargs') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def test_struct_varargs(self): test_path = path_from_root('tests', 'core', 'test_struct_varargs') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def zzztest_nested_struct_varargs(self): test_path = path_from_root('tests', 'core', 'test_nested_struct_varargs') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def test_i32_mul_precise(self): test_path = path_from_root('tests', 'core', 'test_i32_mul_precise') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_i16_emcc_intrinsic(self): test_path = path_from_root('tests', 'core', 'test_i16_emcc_intrinsic') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_double_i64_conversion(self): test_path = path_from_root('tests', 'core', 'test_double_i64_conversion') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_float32_precise(self): Settings.PRECISE_F32 = 1 test_path = path_from_root('tests', 'core', 'test_float32_precise') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_negative_zero(self): test_path = path_from_root('tests', 'core', 'test_negative_zero') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_line_endings(self): self.build(open(path_from_root('tests', 'hello_world.cpp')).read(), self.get_dir(), self.in_dir('hello_world.cpp')) def test_literal_negative_zero(self): test_path = path_from_root('tests', 'core', 'test_literal_negative_zero') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_llvm_intrinsics(self): Settings.PRECISE_I64_MATH = 2 # for bswap64 test_path = path_from_root('tests', 'core', 'test_llvm_intrinsics') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_bswap64(self): test_path = path_from_root('tests', 'core', 'test_bswap64') src, output = (test_path + s for s in ('.in', '.out')) # extra coverages for emulate_casts in [0, 1]: for emulate_fps in [0, 1]: print emulate_casts, emulate_fps Settings.EMULATE_FUNCTION_POINTER_CASTS = emulate_casts Settings.EMULATED_FUNCTION_POINTERS = emulate_fps self.do_run_from_file(src, output) def test_sha1(self): self.do_run(open(path_from_root('tests', 'sha1.c')).read(), 'SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6') def test_asmjs_unknown_emscripten(self): # No other configuration is supported, so always run this. self.do_run(open(path_from_root('tests', 'asmjs-unknown-emscripten.c')).read(), '') def test_cube2md5(self): self.emcc_args += ['--embed-file', 'cube2md5.txt'] shutil.copyfile(path_from_root('tests', 'cube2md5.txt'), os.path.join(self.get_dir(), 'cube2md5.txt')) self.do_run(open(path_from_root('tests', 'cube2md5.cpp')).read(), open(path_from_root('tests', 'cube2md5.ok')).read()) def test_cube2hash(self): # extra testing for various codegen modes try: old_chunk_size = os.environ.get('EMSCRIPT_MAX_CHUNK_SIZE') or '' for chunk_size in ['1', old_chunk_size]: # test splitting out each function to a chunk in emscripten.py (21 functions here) print ' chunks', chunk_size os.environ['EMSCRIPT_MAX_CHUNK_SIZE'] = chunk_size # A good test of i64 math self.do_run('', 'Usage: hashstring <seed>', libraries=self.get_library('cube2hash', ['cube2hash.bc'], configure=None), includes=[path_from_root('tests', 'cube2hash')]) for text, output in [('fleefl', '892BDB6FD3F62E863D63DA55851700FDE3ACF30204798CE9'), ('fleefl2', 'AA2CC5F96FC9D540CA24FDAF1F71E2942753DB83E8A81B61'), ('64bitisslow', '64D8470573635EC354FEE7B7F87C566FCAF1EFB491041670')]: self.do_run('', 'hash value: ' + output, [text], no_build=True) finally: os.environ['EMSCRIPT_MAX_CHUNK_SIZE'] = old_chunk_size def test_unaligned(self): return self.skip('LLVM marks the reads of s as fully aligned, making this test invalid') src = r''' #include<stdio.h> struct S { double x; int y; }; int main() { // the 64-bit value here will not be 8-byte aligned S s0[3] = { {0x12a751f430142, 22}, {0x17a5c85bad144, 98}, {1, 1}}; char buffer[10*sizeof(S)]; int b = int(buffer); S *s = (S*)(b + 4-b%8); s[0] = s0[0]; s[1] = s0[1]; s[2] = s0[2]; printf("*%d : %d : %d\n", sizeof(S), ((unsigned int)&s[0]) % 8 != ((unsigned int)&s[1]) % 8, ((unsigned int)&s[1]) - ((unsigned int)&s[0])); s[0].x++; s[0].y++; s[1].x++; s[1].y++; printf("%.1f,%d,%.1f,%d\n", s[0].x, s[0].y, s[1].x, s[1].y); return 0; } ''' # TODO: A version of this with int64s as well self.do_run(src, '*12 : 1 : 12\n328157500735811.0,23,416012775903557.0,99\n') return # TODO: continue to the next part here # Test for undefined behavior in C. This is not legitimate code, but does exist src = r''' #include <stdio.h> int main() { int x[10]; char *p = (char*)&x[0]; p++; short *q = (short*)p; *q = 300; printf("*%d:%d*\n", *q, ((int)q)%2); int *r = (int*)p; *r = 515559; printf("*%d*\n", *r); long long *t = (long long*)p; *t = 42949672960; printf("*%Ld*\n", *t); return 0; } ''' try: self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n') except Exception, e: assert 'must be aligned' in str(e), e # expected to fail without emulation def test_align64(self): src = r''' #include <stdio.h> // inspired by poppler enum Type { A = 10, B = 20 }; struct Object { Type type; union { int intg; double real; char *name; }; }; struct Principal { double x; Object a; double y; }; int main(int argc, char **argv) { int base = argc-1; Object *o = NULL; printf("%d,%d\n", sizeof(Object), sizeof(Principal)); printf("%d,%d,%d,%d\n", (int)&o[base].type, (int)&o[base].intg, (int)&o[base].real, (int)&o[base].name); printf("%d,%d,%d,%d\n", (int)&o[base+1].type, (int)&o[base+1].intg, (int)&o[base+1].real, (int)&o[base+1].name); Principal p, q; p.x = p.y = q.x = q.y = 0; p.a.type = A; p.a.real = 123.456; *(&q.a) = p.a; printf("%.2f,%d,%.2f,%.2f : %.2f,%d,%.2f,%.2f\n", p.x, p.a.type, p.a.real, p.y, q.x, q.a.type, q.a.real, q.y); return 0; } ''' self.do_run(src, '''16,32 0,8,8,8 16,24,24,24 0.00,10,123.46,0.00 : 0.00,10,123.46,0.00 ''') def test_unsigned(self): src = ''' #include <stdio.h> const signed char cvals[2] = { -1, -2 }; // compiler can store this is a string, so -1 becomes \FF, and needs re-signing int main() { { unsigned char x = 200; printf("*%d*\\n", x); unsigned char y = -22; printf("*%d*\\n", y); } int varey = 100; unsigned int MAXEY = -1, MAXEY2 = -77; printf("*%u,%d,%u*\\n", MAXEY, varey >= MAXEY, MAXEY2); // 100 >= -1? not in unsigned! int y = cvals[0]; printf("*%d,%d,%d,%d*\\n", cvals[0], cvals[0] < 0, y, y < 0); y = cvals[1]; printf("*%d,%d,%d,%d*\\n", cvals[1], cvals[1] < 0, y, y < 0); // zext issue - see mathop in jsifier unsigned char x8 = -10; unsigned long hold = 0; hold += x8; int y32 = hold+50; printf("*%u,%u*\\n", hold, y32); // Comparisons x8 = 0; for (int i = 0; i < 254; i++) x8++; // make it an actual 254 in JS - not a -2 printf("*%d,%d*\\n", x8+1 == 0xff, x8+1 != 0xff); // 0xff may be '-1' in the bitcode return 0; } ''' self.do_run(src, '*4294967295,0,4294967219*\n*-1,1,-1,1*\n*-2,1,-2,1*\n*246,296*\n*1,0*') src = ''' #include <stdio.h> int main() { { unsigned char x; unsigned char *y = &x; *y = -1; printf("*%d*\\n", x); } { unsigned short x; unsigned short *y = &x; *y = -1; printf("*%d*\\n", x); } /*{ // This case is not checked. The hint for unsignedness is just the %u in printf, and we do not analyze that unsigned int x; unsigned int *y = &x; *y = -1; printf("*%u*\\n", x); }*/ { char x; char *y = &x; *y = 255; printf("*%d*\\n", x); } { char x; char *y = &x; *y = 65535; printf("*%d*\\n", x); } { char x; char *y = &x; *y = 0xffffffff; printf("*%d*\\n", x); } return 0; } ''' self.do_run(src, '*255*\n*65535*\n*-1*\n*-1*\n*-1*') def test_bitfields(self): test_path = path_from_root('tests', 'core', 'test_bitfields') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_floatvars(self): test_path = path_from_root('tests', 'core', 'test_floatvars') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_closebitcasts(self): test_path = path_from_root('tests', 'core', 'closebitcasts') src, output = (test_path + s for s in ('.c', '.txt')) self.do_run_from_file(src, output) def test_fast_math(self): Building.COMPILER_TEST_OPTS += ['-ffast-math'] test_path = path_from_root('tests', 'core', 'test_fast_math') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, ['5', '6', '8']) def test_zerodiv(self): test_path = path_from_root('tests', 'core', 'test_zerodiv') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_zero_multiplication(self): test_path = path_from_root('tests', 'core', 'test_zero_multiplication') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_isnan(self): test_path = path_from_root('tests', 'core', 'test_isnan') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_globaldoubles(self): test_path = path_from_root('tests', 'core', 'test_globaldoubles') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_math(self): test_path = path_from_root('tests', 'core', 'test_math') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_erf(self): test_path = path_from_root('tests', 'core', 'test_erf') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_math_hyperbolic(self): src = open(path_from_root('tests', 'hyperbolic', 'src.c'), 'r').read() expected = open(path_from_root('tests', 'hyperbolic', 'output.txt'), 'r').read() self.do_run(src, expected) def test_math_lgamma(self): test_path = path_from_root('tests', 'math', 'lgamma') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) print 'main module' Settings.MAIN_MODULE = 1 self.do_run_from_file(src, output) def test_frexp(self): test_path = path_from_root('tests', 'core', 'test_frexp') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_rounding(self): Settings.PRECISE_F32 = 1 # in the move to llvm 3.7, froundf in musl became more sensitive to float/double differences test_path = path_from_root('tests', 'core', 'test_rounding') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_fcvt(self): test_path = path_from_root('tests', 'core', 'test_fcvt') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_llrint(self): test_path = path_from_root('tests', 'core', 'test_llrint') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_getgep(self): # Generated code includes getelementptr (getelementptr, 0, 1), i.e., GEP as the first param to GEP test_path = path_from_root('tests', 'core', 'test_getgep') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_multiply_defined_symbols(self): a1 = "int f() { return 1; }" a1_name = os.path.join(self.get_dir(), 'a1.c') open(a1_name, 'w').write(a1) a2 = "void x() {}" a2_name = os.path.join(self.get_dir(), 'a2.c') open(a2_name, 'w').write(a2) b1 = "int f() { return 2; }" b1_name = os.path.join(self.get_dir(), 'b1.c') open(b1_name, 'w').write(b1) b2 = "void y() {}" b2_name = os.path.join(self.get_dir(), 'b2.c') open(b2_name, 'w').write(b2) main = r''' #include <stdio.h> int f(); int main() { printf("result: %d\n", f()); return 0; } ''' main_name = os.path.join(self.get_dir(), 'main.c') open(main_name, 'w').write(main) Building.emcc(a1_name) Building.emcc(a2_name) Building.emcc(b1_name) Building.emcc(b2_name) Building.emcc(main_name) liba_name = os.path.join(self.get_dir(), 'liba.a') Building.emar('cr', liba_name, [a1_name + '.o', a2_name + '.o']) libb_name = os.path.join(self.get_dir(), 'libb.a') Building.emar('cr', libb_name, [b1_name + '.o', b2_name + '.o']) all_name = os.path.join(self.get_dir(), 'all.bc') Building.link([main_name + '.o', liba_name, libb_name], all_name) self.do_ll_run(all_name, 'result: 1') def test_if(self): test_path = path_from_root('tests', 'core', 'test_if') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_if_else(self): test_path = path_from_root('tests', 'core', 'test_if_else') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_loop(self): test_path = path_from_root('tests', 'core', 'test_loop') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_stack(self): Settings.INLINING_LIMIT = 50 test_path = path_from_root('tests', 'core', 'test_stack') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_stack_align(self): Settings.INLINING_LIMIT = 50 src = path_from_root('tests', 'core', 'test_stack_align.cpp') def test(): self.do_run(open(src).read(), ['''align 4: 0 align 8: 0 align 16: 0 align 32: 0 base align: 0, 0, 0, 0''']) test() if '-O' in str(self.emcc_args): print 'outlining' Settings.OUTLINING_LIMIT = 60 test() def test_stack_restore(self): if self.is_emterpreter(): return self.skip('generated code not available in emterpreter') self.emcc_args += ['-g3'] # to be able to find the generated code test_path = path_from_root('tests', 'core', 'test_stack_restore') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) generated = open('src.cpp.o.js').read() def ensure_stack_restore_count(function_name, expected_count): code = generated[generated.find(function_name):] code = code[:code.find('\n}') + 2] actual_count = code.count('STACKTOP = sp') assert actual_count == expected_count, ('Expected %d stack restorations, got %d' % (expected_count, actual_count)) + ': ' + code ensure_stack_restore_count('function _no_stack_usage', 0) ensure_stack_restore_count('function _alloca_gets_restored', 1) ensure_stack_restore_count('function _stack_usage', 1) def test_strings(self): test_path = path_from_root('tests', 'core', 'test_strings') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, ['wowie', 'too', '74']) if self.emcc_args == []: gen = open(self.in_dir('src.cpp.o.js')).read() assert ('var __str1;' in gen) == named def test_strcmp_uni(self): test_path = path_from_root('tests', 'core', 'test_strcmp_uni') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strndup(self): test_path = path_from_root('tests', 'core', 'test_strndup') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_errar(self): test_path = path_from_root('tests', 'core', 'test_errar') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_mainenv(self): test_path = path_from_root('tests', 'core', 'test_mainenv') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_funcs(self): test_path = path_from_root('tests', 'core', 'test_funcs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_structs(self): test_path = path_from_root('tests', 'core', 'test_structs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) gen_struct_src = ''' #include <stdio.h> #include <stdlib.h> #include "emscripten.h" struct S { int x, y; }; int main() { S* a = {{gen_struct}}; a->x = 51; a->y = 62; printf("*%d,%d*\\n", a->x, a->y); {{del_struct}}(a); return 0; } ''' def test_mallocstruct(self): self.do_run(self.gen_struct_src.replace('{{gen_struct}}', '(S*)malloc(sizeof(S))').replace('{{del_struct}}', 'free'), '*51,62*') def test_newstruct(self): self.do_run(self.gen_struct_src.replace('{{gen_struct}}', 'new S').replace('{{del_struct}}', 'delete'), '*51,62*') def test_addr_of_stacked(self): test_path = path_from_root('tests', 'core', 'test_addr_of_stacked') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_globals(self): test_path = path_from_root('tests', 'core', 'test_globals') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_linked_list(self): test_path = path_from_root('tests', 'core', 'test_linked_list') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sup(self): src = ''' #include <stdio.h> struct S4 { int x; }; // size: 4 struct S4_2 { short x, y; }; // size: 4, but for alignment purposes, 2 struct S6 { short x, y, z; }; // size: 6 struct S6w { char x[6]; }; // size: 6 also struct S6z { int x; short y; }; // size: 8, since we align to a multiple of the biggest - 4 struct C___ { S6 a, b, c; int later; }; struct Carr { S6 a[3]; int later; }; // essentially the same, but differently defined struct C__w { S6 a; S6w b; S6 c; int later; }; // same size, different struct struct Cp1_ { int pre; short a; S6 b, c; int later; }; // fillers for a struct Cp2_ { int a; short pre; S6 b, c; int later; }; // fillers for a (get addr of the other filler) struct Cint { S6 a; int b; S6 c; int later; }; // An int (different size) for b struct C4__ { S6 a; S4 b; S6 c; int later; }; // Same size as int from before, but a struct struct C4_2 { S6 a; S4_2 b; S6 c; int later; }; // Same size as int from before, but a struct with max element size 2 struct C__z { S6 a; S6z b; S6 c; int later; }; // different size, 8 instead of 6 int main() { #define TEST(struc) \\ { \\ struc *s = 0; \\ printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a), (int)&(s->b), (int)&(s->c), (int)&(s->later), sizeof(struc)); \\ } #define TEST_ARR(struc) \\ { \\ struc *s = 0; \\ printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a[0]), (int)&(s->a[1]), (int)&(s->a[2]), (int)&(s->later), sizeof(struc)); \\ } printf("sizeofs:%d,%d\\n", sizeof(S6), sizeof(S6z)); TEST(C___); TEST_ARR(Carr); TEST(C__w); TEST(Cp1_); TEST(Cp2_); TEST(Cint); TEST(C4__); TEST(C4_2); TEST(C__z); return 0; } ''' self.do_run(src, 'sizeofs:6,8\n*C___: 0,6,12,20<24*\n*Carr: 0,6,12,20<24*\n*C__w: 0,6,12,20<24*\n*Cp1_: 4,6,12,20<24*\n*Cp2_: 0,6,12,20<24*\n*Cint: 0,8,12,20<24*\n*C4__: 0,8,12,20<24*\n*C4_2: 0,6,10,16<20*\n*C__z: 0,8,16,24<28*') def test_assert(self): test_path = path_from_root('tests', 'core', 'test_assert') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_libcextra(self): test_path = path_from_root('tests', 'core', 'test_libcextra') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_regex(self): test_path = path_from_root('tests', 'core', 'test_regex') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_longjmp(self): test_path = path_from_root('tests', 'core', 'test_longjmp') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_longjmp2(self): test_path = path_from_root('tests', 'core', 'test_longjmp2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_longjmp3(self): test_path = path_from_root('tests', 'core', 'test_longjmp3') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_longjmp4(self): test_path = path_from_root('tests', 'core', 'test_longjmp4') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_longjmp_funcptr(self): test_path = path_from_root('tests', 'core', 'test_longjmp_funcptr') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_longjmp_repeat(self): test_path = path_from_root('tests', 'core', 'test_longjmp_repeat') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_longjmp_stacked(self): test_path = path_from_root('tests', 'core', 'test_longjmp_stacked') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_longjmp_exc(self): test_path = path_from_root('tests', 'core', 'test_longjmp_exc') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_longjmp_throw(self): for disable_throw in [0, 1]: print disable_throw Settings.DISABLE_EXCEPTION_CATCHING = disable_throw test_path = path_from_root('tests', 'core', 'test_longjmp_throw') src, output = (test_path + s for s in ('.cpp', '.out')) self.do_run_from_file(src, output) def test_setjmp_many(self): src = r''' #include <stdio.h> #include <setjmp.h> int main(int argc) { jmp_buf buf; for (int i = 0; i < NUM; i++) printf("%d\n", setjmp(buf)); if (argc-- == 1131) longjmp(buf, 11); return 0; } ''' for num in [1, 5, 20, 1000]: print num self.do_run(src.replace('NUM', str(num)), '0\n' * num) def test_setjmp_many_2(self): src = r''' #include <setjmp.h> #include <stdio.h> jmp_buf env; void luaWork(int d){ int x; printf("d is at %d\n", d); longjmp(env, 1); } int main() { const int ITERATIONS=25; for(int i = 0; i < ITERATIONS; i++){ if(!setjmp(env)){ luaWork(i); } } return 0; } ''' self.do_run(src, r'''d is at 24''') def test_setjmp_noleak(self): src = r''' #include <setjmp.h> #include <stdio.h> #include <assert.h> jmp_buf env; void luaWork(int d){ int x; printf("d is at %d\n", d); longjmp(env, 1); } #include <malloc.h> #include <stdlib.h> void dump() { struct mallinfo m = mallinfo(); printf("dump: %d , %d\n", m.arena, m.uordblks); } void work(int n) { printf("work %d\n", n); dump(); if(!setjmp(env)){ luaWork(n); } if (n > 0) work(n-1); } int main() { struct mallinfo m1 = mallinfo(); dump(); work(10); dump(); struct mallinfo m2 = mallinfo(); assert(m1.arena == m2.arena && m1.uordblks == m2.uordblks); printf("ok.\n"); } ''' self.do_run(src, r'''ok.''') def test_exceptions(self): Settings.EXCEPTION_DEBUG = 1 Settings.DISABLE_EXCEPTION_CATCHING = 0 if '-O2' in self.emcc_args: self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage src = ''' #include <stdio.h> void thrower() { printf("infunc..."); throw(99); printf("FAIL"); } int main() { try { printf("*throw..."); throw(1); printf("FAIL"); } catch(...) { printf("caught!"); } try { thrower(); } catch(...) { printf("done!*\\n"); } return 0; } ''' self.do_run(src, '*throw...caught!infunc...done!*') Settings.DISABLE_EXCEPTION_CATCHING = 1 self.do_run(src, 'Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0') src = ''' #include <iostream> class MyException { public: MyException(){ std::cout << "Construct..."; } MyException( const MyException & ) { std::cout << "Copy..."; } ~MyException(){ std::cout << "Destruct..."; } }; int function() { std::cout << "Throw..."; throw MyException(); } int function2() { return function(); } int main() { try { function2(); } catch (MyException & e) { std::cout << "Caught..."; } try { function2(); } catch (MyException e) { std::cout << "Caught..."; } return 0; } ''' Settings.DISABLE_EXCEPTION_CATCHING = 0 self.do_run(src, 'Throw...Construct...Caught...Destruct...Throw...Construct...Copy...Caught...Destruct...Destruct...') def test_exceptions_2(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 for safe in [0,1]: print safe Settings.SAFE_HEAP = safe test_path = path_from_root('tests', 'core', 'test_exceptions_2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_exceptions_3(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 src = r''' #include <iostream> #include <stdexcept> int main(int argc, char **argv) { if (argc != 2) { std::cout << "need an arg" << std::endl; return 1; } int arg = argv[1][0] - '0'; try { if (arg == 0) throw "a c string"; if (arg == 1) throw std::exception(); if (arg == 2) throw std::runtime_error("Hello"); } catch(const char * ex) { std::cout << "Caught C string: " << ex << std::endl; } catch(const std::exception &ex) { std::cout << "Caught exception: " << ex.what() << std::endl; } catch(...) { std::cout << "Caught something else" << std::endl; } std::cout << "Done.\n"; } ''' print '0' self.do_run(src, 'Caught C string: a c string\nDone.', ['0']) print '1' self.do_run(src, 'Caught exception: std::exception\nDone.', ['1'], no_build=True) print '2' self.do_run(src, 'Caught exception: Hello\nDone.', ['2'], no_build=True) def test_exceptions_white_list(self): Settings.DISABLE_EXCEPTION_CATCHING = 2 Settings.EXCEPTION_CATCHING_WHITELIST = ["__Z12somefunctionv"] Settings.INLINING_LIMIT = 50 # otherwise it is inlined and not identified test_path = path_from_root('tests', 'core', 'test_exceptions_white_list') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) size = len(open('src.cpp.o.js').read()) shutil.copyfile('src.cpp.o.js', 'orig.js') # check that an empty whitelist works properly (as in, same as exceptions disabled) empty_output = path_from_root('tests', 'core', 'test_exceptions_white_list_empty.out') Settings.EXCEPTION_CATCHING_WHITELIST = [] self.do_run_from_file(src, empty_output) empty_size = len(open('src.cpp.o.js').read()) shutil.copyfile('src.cpp.o.js', 'empty.js') Settings.EXCEPTION_CATCHING_WHITELIST = ['fake'] self.do_run_from_file(src, empty_output) fake_size = len(open('src.cpp.o.js').read()) shutil.copyfile('src.cpp.o.js', 'fake.js') Settings.DISABLE_EXCEPTION_CATCHING = 1 self.do_run_from_file(src, empty_output) disabled_size = len(open('src.cpp.o.js').read()) shutil.copyfile('src.cpp.o.js', 'disabled.js') assert size - empty_size > 0.005*size, [empty_size, size] # big change when we disable entirely assert size - fake_size > 0.005*size, [fake_size, size] assert abs(empty_size - fake_size) < 0.007*size, [empty_size, fake_size] assert empty_size - disabled_size < 0.007*size, [empty_size, disabled_size] # full disable removes a little bit more assert fake_size - disabled_size < 0.007*size, [disabled_size, fake_size] def test_exceptions_white_list_2(self): Settings.DISABLE_EXCEPTION_CATCHING = 2 Settings.EXCEPTION_CATCHING_WHITELIST = ["_main"] Settings.INLINING_LIMIT = 50 # otherwise it is inlined and not identified test_path = path_from_root('tests', 'core', 'test_exceptions_white_list_2') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def test_exceptions_uncaught(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 src = r''' #include <stdio.h> #include <exception> struct X { ~X() { printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no"); } }; int main() { printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no"); try { X x; throw 1; } catch(...) { printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no"); } printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no"); return 0; } ''' self.do_run(src, 'exception? no\nexception? yes\nexception? no\nexception? no\n') src = r''' #include <fstream> #include <iostream> int main() { std::ofstream os("test"); os << std::unitbuf << "foo"; // trigger a call to std::uncaught_exception from // std::basic_ostream::sentry::~sentry std::cout << "success"; } ''' self.do_run(src, 'success') def test_exceptions_typed(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 self.emcc_args += ['-s', 'SAFE_HEAP=0'] # Throwing null will cause an ignorable null pointer access. test_path = path_from_root('tests', 'core', 'test_exceptions_typed') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_exceptions_virtual_inheritance(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 test_path = path_from_root('tests', 'core', 'test_exceptions_virtual_inheritance') src, output = (test_path + s for s in ('.cpp', '.txt')) self.do_run_from_file(src, output) def test_exceptions_convert(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 test_path = path_from_root('tests', 'core', 'test_exceptions_convert') src, output = (test_path + s for s in ('.cpp', '.txt')) self.do_run_from_file(src, output) def test_exceptions_multi(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 test_path = path_from_root('tests', 'core', 'test_exceptions_multi') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_exceptions_std(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 Settings.ERROR_ON_UNDEFINED_SYMBOLS = 1 self.emcc_args += ['-s', 'SAFE_HEAP=0'] test_path = path_from_root('tests', 'core', 'test_exceptions_std') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_exceptions_alias(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 test_path = path_from_root('tests', 'core', 'test_exceptions_alias') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def test_exceptions_rethrow(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 test_path = path_from_root('tests', 'core', 'test_exceptions_rethrow') src, output = (test_path + s for s in ('.cpp', '.txt')) self.do_run_from_file(src, output) def test_exceptions_resume(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 Settings.EXCEPTION_DEBUG = 1 test_path = path_from_root('tests', 'core', 'test_exceptions_resume') src, output = (test_path + s for s in ('.cpp', '.txt')) self.do_run_from_file(src, output) def test_exceptions_destroy_virtual(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 test_path = path_from_root('tests', 'core', 'test_exceptions_destroy_virtual') src, output = (test_path + s for s in ('.cpp', '.txt')) self.do_run_from_file(src, output) def test_exceptions_refcount(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 test_path = path_from_root('tests', 'core', 'test_exceptions_refcount') src, output = (test_path + s for s in ('.cpp', '.txt')) self.do_run_from_file(src, output) def test_exceptions_primary(self): Settings.DISABLE_EXCEPTION_CATCHING = 0 test_path = path_from_root('tests', 'core', 'test_exceptions_primary') src, output = (test_path + s for s in ('.cpp', '.txt')) self.do_run_from_file(src, output) def test_bad_typeid(self): Settings.ERROR_ON_UNDEFINED_SYMBOLS = 1 Settings.DISABLE_EXCEPTION_CATCHING = 0 self.do_run(r''' // exception example #include <iostream> // std::cerr #include <typeinfo> // operator typeid #include <exception> // std::exception class Polymorphic {virtual void member(){}}; int main () { try { Polymorphic * pb = 0; typeid(*pb); // throws a bad_typeid exception } catch (std::exception& e) { std::cerr << "exception caught: " << e.what() << '\n'; } return 0; } ''', 'exception caught: std::bad_typeid') def test_exit_stack(self): if Settings.ASM_JS: return self.skip('uses report_stack without exporting') Settings.INLINING_LIMIT = 50 Settings.NO_EXIT_RUNTIME = 1 src = r''' #include <stdio.h> #include <stdlib.h> extern "C" { extern void report_stack(int x); } char moar() { char temp[125]; for (int i = 0; i < 125; i++) temp[i] = i*i; for (int i = 1; i < 125; i++) temp[i] += temp[i-1]/2; if (temp[100] != 99) exit(1); return temp[120]; } int main(int argc, char *argv[]) { report_stack((int)alloca(4)); printf("*%d*\n", moar()); return 0; } ''' open(os.path.join(self.get_dir(), 'pre.js'), 'w').write(''' var initialStack = -1; var _report_stack = function(x) { Module.print('reported'); initialStack = x; } var Module = { postRun: function() { Module.print('Exit Status: ' + EXITSTATUS); Module.print('postRun'); assert(initialStack == STACKTOP, [initialStack, STACKTOP]); Module.print('ok.'); } }; ''') self.emcc_args += ['--pre-js', 'pre.js'] self.do_run(src, '''reported\n*0*\nExit Status: 0\npostRun\nok.\n''') def test_class(self): test_path = path_from_root('tests', 'core', 'test_class') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_inherit(self): test_path = path_from_root('tests', 'core', 'test_inherit') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_isdigit_l(self): test_path = path_from_root('tests', 'core', 'test_isdigit_l') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_iswdigit(self): test_path = path_from_root('tests', 'core', 'test_iswdigit') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_polymorph(self): test_path = path_from_root('tests', 'core', 'test_polymorph') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_complex(self): self.do_run(r''' #include <complex.h> #include <stdio.h> int main(int argc, char**argv) { float complex z1 = 1.0 + 3.0 * I; printf("value = real %.2f imag %.2f\n",creal(z1),cimag(z1)); float abs_value = cabsf(z1); printf("abs = %.2f\n",abs_value); float complex z2 = conjf(z1); printf("value = real %.2f imag %.2f\n",creal(z2),cimag(z2)); float complex z3 = cexpf(z1); printf("value = real %.2f imag %.2f\n",creal(z3),cimag(z3)); float complex z4 = conj(z1); printf("value = real %.2f imag %.2f\n",creal(z4),cimag(z4)); float complex z5 = cargf(z1); printf("value = real %.2f imag %.2f\n",creal(z5),cimag(z5)); return 0; } ''', '''value = real 1.00 imag 3.00 abs = 3.16 value = real 1.00 imag -3.00 value = real -2.69 imag 0.38 value = real 1.00 imag -3.00 value = real 1.25 imag 0.00''', force_c=True) def test_segfault(self): Settings.SAFE_HEAP = 1 for addr in ['0', 'new D2()']: print addr src = r''' #include <stdio.h> struct Classey { virtual void doIt() = 0; }; struct D1 : Classey { virtual void doIt() { printf("fleefl\n"); } }; struct D2 : Classey { virtual void doIt() { printf("marfoosh\n"); } }; int main(int argc, char **argv) { Classey *p = argc == 100 ? new D1() : (Classey*)%s; p->doIt(); return 0; } ''' % addr self.do_run(src, 'segmentation fault' if addr.isdigit() else 'marfoosh') def test_dynamic_cast(self): test_path = path_from_root('tests', 'core', 'test_dynamic_cast') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_dynamic_cast_b(self): test_path = path_from_root('tests', 'core', 'test_dynamic_cast_b') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_dynamic_cast_2(self): test_path = path_from_root('tests', 'core', 'test_dynamic_cast_2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_funcptr(self): test_path = path_from_root('tests', 'core', 'test_funcptr') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_mathfuncptr(self): test_path = path_from_root('tests', 'core', 'test_mathfuncptr') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) if self.is_emterpreter(): print 'emterpreter f32' Settings.PRECISE_F32 = 1 self.do_run_from_file(src, output) def test_funcptrfunc(self): test_path = path_from_root('tests', 'core', 'test_funcptrfunc') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_funcptr_namecollide(self): test_path = path_from_root('tests', 'core', 'test_funcptr_namecollide') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, force_c=True) def test_emptyclass(self): test_path = path_from_root('tests', 'core', 'test_emptyclass') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_alloca(self): test_path = path_from_root('tests', 'core', 'test_alloca') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, force_c=True) def test_rename(self): src = open(path_from_root('tests', 'stdio', 'test_rename.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_alloca_stack(self): test_path = path_from_root('tests', 'core', 'test_alloca_stack') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, force_c=True) def test_stack_byval(self): test_path = path_from_root('tests', 'core', 'test_stack_byval') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_stack_varargs(self): Settings.INLINING_LIMIT = 50 Settings.TOTAL_STACK = 1024 test_path = path_from_root('tests', 'core', 'test_stack_varargs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_stack_varargs2(self): Settings.TOTAL_STACK = 1536 src = r''' #include <stdio.h> #include <stdlib.h> void func(int i) { } int main() { for (int i = 0; i < 1024; i++) { printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i); } printf("ok!\n"); return 0; } ''' self.do_run(src, 'ok!') print 'with return' src = r''' #include <stdio.h> #include <stdlib.h> int main() { for (int i = 0; i < 1024; i++) { int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d", i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i); printf(" (%d)\n", j); } printf("ok!\n"); return 0; } ''' self.do_run(src, 'ok!') print 'with definitely no return' src = r''' #include <stdio.h> #include <stdlib.h> #include <stdarg.h> void vary(const char *s, ...) { va_list v; va_start(v, s); char d[20]; vsnprintf(d, 20, s, v); puts(d); // Try it with copying va_list tempva; va_copy(tempva, v); vsnprintf(d, 20, s, tempva); puts(d); va_end(v); } int main() { for (int i = 0; i < 1024; i++) { int j = printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d", i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i); printf(" (%d)\n", j); vary("*cheez: %d+%d*", 99, 24); vary("*albeit*"); } printf("ok!\n"); return 0; } ''' self.do_run(src, 'ok!') def test_stack_void(self): Settings.INLINING_LIMIT = 50 test_path = path_from_root('tests', 'core', 'test_stack_void') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_life(self): self.emcc_args += ['-std=c99'] src = open(path_from_root('tests', 'life.c'), 'r').read() self.do_run(src, '''-------------------------------- [] [] [][][] [] [] [] [][] [] [] [] [] [][] [][] [][][] [] [] [] [] [] [][] [] [] [] [][] [] [] [] [] [][][][] [][] [][] [] [][][] [] [] [] [][] [][] [][] [][][] [][] [][][] [] [] [][] [][] [] [][][] [] [][][] [] [][] [][] [][] [] [][] [][] [][] [][] [] [][] [][] [] [] [][] [] [][][] [] [] [][] [] [] [] [] [] [] [] [][][] [] [][][] [] -------------------------------- ''', ['2'], force_c=True) def test_array2(self): test_path = path_from_root('tests', 'core', 'test_array2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_array2b(self): test_path = path_from_root('tests', 'core', 'test_array2b') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_constglobalstructs(self): test_path = path_from_root('tests', 'core', 'test_constglobalstructs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_conststructs(self): test_path = path_from_root('tests', 'core', 'test_conststructs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_bigarray(self): test_path = path_from_root('tests', 'core', 'test_bigarray') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_mod_globalstruct(self): test_path = path_from_root('tests', 'core', 'test_mod_globalstruct') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_pystruct(self): src = ''' #include <stdio.h> // Based on CPython code union PyGC_Head { struct { union PyGC_Head *gc_next; union PyGC_Head *gc_prev; size_t gc_refs; } gc; long double dummy; /* force worst-case alignment */ } ; struct gc_generation { PyGC_Head head; int threshold; /* collection threshold */ int count; /* count of allocations or collections of younger generations */ }; #define NUM_GENERATIONS 3 #define GEN_HEAD(n) (&generations[n].head) /* linked lists of container objects */ static struct gc_generation generations[NUM_GENERATIONS] = { /* PyGC_Head, threshold, count */ {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0}, {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0}, {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0}, }; int main() { gc_generation *n = NULL; printf("*%d,%d,%d,%d,%d,%d,%d,%d*\\n", (int)(&n[0]), (int)(&n[0].head), (int)(&n[0].head.gc.gc_next), (int)(&n[0].head.gc.gc_prev), (int)(&n[0].head.gc.gc_refs), (int)(&n[0].threshold), (int)(&n[0].count), (int)(&n[1]) ); printf("*%d,%d,%d*\\n", (int)(&generations[0]) == (int)(&generations[0].head.gc.gc_next), (int)(&generations[0]) == (int)(&generations[0].head.gc.gc_prev), (int)(&generations[0]) == (int)(&generations[1]) ); int x1 = (int)(&generations[0]); int x2 = (int)(&generations[1]); printf("*%d*\\n", x1 == x2); for (int i = 0; i < NUM_GENERATIONS; i++) { PyGC_Head *list = GEN_HEAD(i); printf("%d:%d,%d\\n", i, (int)list == (int)(list->gc.gc_prev), (int)list ==(int)(list->gc.gc_next)); } printf("*%d,%d,%d*\\n", sizeof(PyGC_Head), sizeof(gc_generation), int(GEN_HEAD(2)) - int(GEN_HEAD(1))); } ''' def test(): self.do_run(src, '*0,0,0,4,8,16,20,24*\n*1,0,0*\n*0*\n0:1,1\n1:1,1\n2:1,1\n*16,24,24*') test() print 'relocatable' # this tests recursive global structs => nontrivial postSets for relocation assert Settings.RELOCATABLE == Settings.EMULATED_FUNCTION_POINTERS == 0 Settings.RELOCATABLE = Settings.EMULATED_FUNCTION_POINTERS = 1 test() Settings.RELOCATABLE = Settings.EMULATED_FUNCTION_POINTERS = 0 def test_ptrtoint(self): runner = self def check_warnings(output): runner.assertEquals(filter(lambda line: 'Warning' in line, output.split('\n')).__len__(), 4) test_path = path_from_root('tests', 'core', 'test_ptrtoint') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, output_processor=check_warnings) def test_sizeof(self): # Has invalid writes between printouts Settings.SAFE_HEAP = 0 test_path = path_from_root('tests', 'core', 'test_sizeof') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, [], lambda x, err: x.replace('\n', '*')) def test_llvm_used(self): Building.LLVM_OPTS = 3 test_path = path_from_root('tests', 'core', 'test_llvm_used') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_set_align(self): Settings.SAFE_HEAP = 1 test_path = path_from_root('tests', 'core', 'test_set_align') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def test_emscripten_api(self): #if Building.LLVM_OPTS: return self.skip('FIXME') test_path = path_from_root('tests', 'core', 'test_emscripten_api') src, output = (test_path + s for s in ('.in', '.out')) check = ''' def process(filename): src = open(filename, 'r').read() # TODO: restore this (see comment in emscripten.h) assert '// hello from the source' in src ''' Settings.EXPORTED_FUNCTIONS = ['_main', '_save_me_aimee'] self.do_run_from_file(src, output, post_build=check) # test EXPORT_ALL Settings.EXPORTED_FUNCTIONS = [] Settings.EXPORT_ALL = 1 self.do_run_from_file(src, output, post_build=check) def test_emscripten_get_now(self): self.banned_js_engines = [V8_ENGINE] # timer limitations in v8 shell if self.run_name == 'asm2': self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage self.do_run(open(path_from_root('tests', 'emscripten_get_now.cpp')).read(), 'Timer resolution is good.') def test_emscripten_get_compiler_setting(self): test_path = path_from_root('tests', 'core', 'emscripten_get_compiler_setting') src, output = (test_path + s for s in ('.c', '.out')) self.do_run(open(src).read(), 'You must build with -s RETAIN_COMPILER_SETTINGS=1') Settings.RETAIN_COMPILER_SETTINGS = 1 self.do_run(open(src).read(), open(output).read().replace('waka', EMSCRIPTEN_VERSION)) # TODO: test only worked in non-fastcomp def test_inlinejs(self): return self.skip('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM test_path = path_from_root('tests', 'core', 'test_inlinejs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) if self.emcc_args == []: # opts will eliminate the comments out = open('src.cpp.o.js').read() for i in range(1, 5): assert ('comment%d' % i) in out # TODO: test only worked in non-fastcomp def test_inlinejs2(self): return self.skip('non-fastcomp is deprecated and fails in 3.5') # only supports EM_ASM test_path = path_from_root('tests', 'core', 'test_inlinejs2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_inlinejs3(self): test_path = path_from_root('tests', 'core', 'test_inlinejs3') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) print 'no debugger, check validation' src = open(src).read().replace('emscripten_debugger();', '') self.do_run(src, open(output).read()) def test_inlinejs4(self): self.do_run(r''' #include <emscripten.h> #define TO_STRING_INNER(x) #x #define TO_STRING(x) TO_STRING_INNER(x) #define assert_msg(msg, file, line) EM_ASM( throw 'Assert (' + msg + ') failed in ' + file + ':' + line + '!'; ) #define assert(expr) { \ if (!(expr)) { \ assert_msg(#expr, TO_STRING(__FILE__), TO_STRING(__LINE__)); \ } \ } int main(int argc, char **argv) { assert(argc != 17); assert(false); return 0; } ''', 'false') def test_em_asm_unicode(self): self.do_run(r''' #include <emscripten.h> int main() { EM_ASM( Module.print("hello world…") ); } ''', 'hello world…') def test_memorygrowth(self): self.banned_js_engines = [V8_ENGINE] # stderr printing limitations in v8 self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=0'] # start with 0 # With typed arrays in particular, it is dangerous to use more memory than TOTAL_MEMORY, # since we then need to enlarge the heap(s). src = r''' #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "emscripten.h" int main(int argc, char **argv) { char *buf1 = (char*)malloc(100); char *data1 = "hello"; memcpy(buf1, data1, strlen(data1)+1); float *buf2 = (float*)malloc(100); float pie = 4.955; memcpy(buf2, &pie, sizeof(float)); printf("*pre: %s,%.3f*\n", buf1, buf2[0]); int totalMemory = emscripten_run_script_int("TOTAL_MEMORY"); char *buf3 = (char*)malloc(totalMemory+1); buf3[argc] = (int)buf2; if (argc % 7 == 6) printf("%d\n", memcpy(buf3, buf1, argc)); char *buf4 = (char*)malloc(100); float *buf5 = (float*)malloc(100); //printf("totalMemory: %d bufs: %d,%d,%d,%d,%d\n", totalMemory, buf1, buf2, buf3, buf4, buf5); assert((int)buf4 > (int)totalMemory && (int)buf5 > (int)totalMemory); printf("*%s,%.3f*\n", buf1, buf2[0]); // the old heap data should still be there memcpy(buf4, buf1, strlen(data1)+1); memcpy(buf5, buf2, sizeof(float)); printf("*%s,%.3f*\n", buf4, buf5[0]); // and the new heap space should work too return 0; } ''' # Fail without memory growth self.do_run(src, 'Cannot enlarge memory arrays.') fail = open('src.cpp.o.js').read() # Win with it self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=1'] self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*') win = open('src.cpp.o.js').read() if '-O2' in self.emcc_args: # Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized) code_start = 'var TOTAL_MEMORY' fail = fail[fail.find(code_start):] win = win[win.find(code_start):] assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller' + str([len(fail), len(win)]) def test_ssr(self): # struct self-ref src = ''' #include <stdio.h> // see related things in openjpeg typedef struct opj_mqc_state { unsigned int qeval; int mps; struct opj_mqc_state *nmps; struct opj_mqc_state *nlps; } opj_mqc_state_t; static opj_mqc_state_t mqc_states[2] = { {0x5600, 0, &mqc_states[2], &mqc_states[3]}, {0x5602, 1, &mqc_states[3], &mqc_states[2]}, }; int main() { printf("*%d*\\n", (int)(mqc_states+1)-(int)mqc_states); for (int i = 0; i < 2; i++) printf("%d:%d,%d,%d,%d\\n", i, mqc_states[i].qeval, mqc_states[i].mps, (int)mqc_states[i].nmps-(int)mqc_states, (int)mqc_states[i].nlps-(int)mqc_states); return 0; } ''' self.do_run(src, '''*16*\n0:22016,0,32,48\n1:22018,1,48,32\n''') def test_tinyfuncstr(self): test_path = path_from_root('tests', 'core', 'test_tinyfuncstr') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_llvmswitch(self): test_path = path_from_root('tests', 'core', 'test_llvmswitch') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) # By default, when user has not specified a -std flag, Emscripten should always build .cpp files using the C++03 standard, # i.e. as if "-std=c++03" had been passed on the command line. On Linux with Clang 3.2 this is the case, but on Windows # with Clang 3.2 -std=c++11 has been chosen as default, because of # < jrose> clb: it's deliberate, with the idea that for people who don't care about the standard, they should be using the "best" thing we can offer on that platform def test_cxx03_do_run(self): test_path = path_from_root('tests', 'core', 'test_cxx03_do_run') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_bigswitch(self): if self.run_name != 'default': return self.skip('TODO: issue #781') src = open(path_from_root('tests', 'bigswitch.cpp')).read() self.do_run(src, '''34962: GL_ARRAY_BUFFER (0x8892) 26214: what? 35040: GL_STREAM_DRAW (0x88E0) ''', args=['34962', '26214', '35040']) def test_biggerswitch(self): num_cases = 2000 # TODO: Increase this to ~20000 range, since seeing autogenerated code that reaches that many cases. switch_case, err = Popen([PYTHON, path_from_root('tests', 'gen_large_switchcase.py'), str(num_cases)], stdout=PIPE, stderr=PIPE).communicate() self.do_run(switch_case, 'Success!') def test_indirectbr(self): Building.COMPILER_TEST_OPTS = filter(lambda x: x != '-g', Building.COMPILER_TEST_OPTS) test_path = path_from_root('tests', 'core', 'test_indirectbr') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_indirectbr_many(self): test_path = path_from_root('tests', 'core', 'test_indirectbr_many') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_pack(self): src = ''' #include <stdio.h> #include <string.h> #pragma pack(push,1) typedef struct header { unsigned char id; unsigned short colour; unsigned char desc; } header; #pragma pack(pop) typedef struct fatheader { unsigned char id; unsigned short colour; unsigned char desc; } fatheader; int main( int argc, const char *argv[] ) { header h, *ph = 0; fatheader fh, *pfh = 0; printf("*%d,%d,%d*\\n", sizeof(header), (int)((int)&h.desc - (int)&h.id), (int)(&ph[1])-(int)(&ph[0])); printf("*%d,%d,%d*\\n", sizeof(fatheader), (int)((int)&fh.desc - (int)&fh.id), (int)(&pfh[1])-(int)(&pfh[0])); return 0; } ''' self.do_run(src, '*4,3,4*\n*6,4,6*') def test_varargs(self): test_path = path_from_root('tests', 'core', 'test_varargs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_varargs_byval(self): return self.skip('clang cannot compile this code with that target yet') src = r''' #include <stdio.h> #include <stdarg.h> typedef struct type_a { union { double f; void *p; int i; short sym; } value; } type_a; enum mrb_vtype { MRB_TT_FALSE = 0, /* 0 */ MRB_TT_CLASS = 9 /* 9 */ }; typedef struct type_b { enum mrb_vtype tt:8; } type_b; void print_type_a(int argc, ...); void print_type_b(int argc, ...); int main(int argc, char *argv[]) { type_a a; type_b b; a.value.p = (void*) 0x12345678; b.tt = MRB_TT_CLASS; printf("The original address of a is: %p\n", a.value.p); printf("The original type of b is: %d\n", b.tt); print_type_a(1, a); print_type_b(1, b); return 0; } void print_type_a(int argc, ...) { va_list ap; type_a a; va_start(ap, argc); a = va_arg(ap, type_a); va_end(ap); printf("The current address of a is: %p\n", a.value.p); } void print_type_b(int argc, ...) { va_list ap; type_b b; va_start(ap, argc); b = va_arg(ap, type_b); va_end(ap); printf("The current type of b is: %d\n", b.tt); } ''' self.do_run(src, '''The original address of a is: 0x12345678 The original type of b is: 9 The current address of a is: 0x12345678 The current type of b is: 9 ''') def test_functionpointer_libfunc_varargs(self): test_path = path_from_root('tests', 'core', 'test_functionpointer_libfunc_varargs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_structbyval(self): Settings.INLINING_LIMIT = 50 # part 1: make sure that normally, passing structs by value works src = r''' #include <stdio.h> struct point { int x, y; }; void dump(struct point p) { p.x++; // should not modify p.y++; // anything in the caller! printf("dump: %d,%d\n", p.x, p.y); } void dumpmod(struct point *p) { p->x++; // should not modify p->y++; // anything in the caller! printf("dump: %d,%d\n", p->x, p->y); } int main( int argc, const char *argv[] ) { point p = { 54, 2 }; printf("pre: %d,%d\n", p.x, p.y); dump(p); void (*dp)(point p) = dump; // And, as a function pointer dp(p); printf("post: %d,%d\n", p.x, p.y); dumpmod(&p); dumpmod(&p); printf("last: %d,%d\n", p.x, p.y); return 0; } ''' self.do_run(src, 'pre: 54,2\ndump: 55,3\ndump: 55,3\npost: 54,2\ndump: 55,3\ndump: 56,4\nlast: 56,4') # Check for lack of warning in the generated code (they should appear in part 2) generated = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read() assert 'Casting a function pointer type to another with a different number of arguments.' not in generated, 'Unexpected warning' # part 2: make sure we warn about mixing c and c++ calling conventions here if self.emcc_args != []: return # Optimized code is missing the warning comments header = r''' struct point { int x, y; }; ''' open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header) supp = r''' #include <stdio.h> #include "header.h" void dump(struct point p) { p.x++; // should not modify p.y++; // anything in the caller! printf("dump: %d,%d\n", p.x, p.y); } ''' supp_name = os.path.join(self.get_dir(), 'supp.c') open(supp_name, 'w').write(supp) main = r''' #include <stdio.h> #include "header.h" #ifdef __cplusplus extern "C" { #endif void dump(struct point p); #ifdef __cplusplus } #endif int main( int argc, const char *argv[] ) { struct point p = { 54, 2 }; printf("pre: %d,%d\n", p.x, p.y); dump(p); void (*dp)(struct point p) = dump; // And, as a function pointer dp(p); printf("post: %d,%d\n", p.x, p.y); return 0; } ''' main_name = os.path.join(self.get_dir(), 'main.cpp') open(main_name, 'w').write(main) Building.emcc(supp_name) Building.emcc(main_name) all_name = os.path.join(self.get_dir(), 'all.bc') Building.link([supp_name + '.o', main_name + '.o'], all_name) # This will fail! See explanation near the warning we check for, in the compiler source code output = Popen([PYTHON, EMCC, all_name], stderr=PIPE).communicate() # Check for warning in the generated code generated = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read() print >> sys.stderr, 'skipping C/C++ conventions warning check, since not i386-pc-linux-gnu' def test_stdlibs(self): # safe heap prints a warning that messes up our output. Settings.SAFE_HEAP = 0 src = ''' #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <sys/time.h> void clean() { printf("*cleaned*\\n"); } int comparer(const void *a, const void *b) { int aa = *((int*)a); int bb = *((int*)b); return aa - bb; } int main() { // timeofday timeval t; gettimeofday(&t, NULL); printf("*%d,%d\\n", int(t.tv_sec), int(t.tv_usec)); // should not crash // atexit atexit(clean); // qsort int values[6] = { 3, 2, 5, 1, 5, 6 }; qsort(values, 5, sizeof(int), comparer); printf("*%d,%d,%d,%d,%d,%d*\\n", values[0], values[1], values[2], values[3], values[4], values[5]); printf("*stdin==0:%d*\\n", stdin == 0); // check that external values are at least not NULL printf("*%%*\\n"); printf("*%.1ld*\\n", 5); printf("*%.1f*\\n", strtod("66", NULL)); // checks dependency system, as our strtod needs _isspace etc. printf("*%ld*\\n", strtol("10", NULL, 0)); printf("*%ld*\\n", strtol("0", NULL, 0)); printf("*%ld*\\n", strtol("-10", NULL, 0)); printf("*%ld*\\n", strtol("12", NULL, 16)); printf("*%lu*\\n", strtoul("10", NULL, 0)); printf("*%lu*\\n", strtoul("0", NULL, 0)); printf("*%lu*\\n", strtoul("-10", NULL, 0)); printf("*malloc(0)!=0:%d*\\n", malloc(0) != 0); // We should not fail horribly printf("tolower_l: %c\\n", tolower_l('A', 0)); return 0; } ''' self.do_run(src, '*1,2,3,5,5,6*\n*stdin==0:0*\n*%*\n*5*\n*66.0*\n*10*\n*0*\n*-10*\n*18*\n*10*\n*0*\n*4294967286*\n*malloc(0)!=0:1*\ntolower_l: a\n*cleaned*') src = r''' #include <stdio.h> #include <stdbool.h> int main() { bool x = true; bool y = false; printf("*%d*\n", x != y); return 0; } ''' self.do_run(src, '*1*', force_c=True) def test_strtoll_hex(self): # tests strtoll for hex strings (0x...) test_path = path_from_root('tests', 'core', 'test_strtoll_hex') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strtoll_dec(self): # tests strtoll for decimal strings (0x...) test_path = path_from_root('tests', 'core', 'test_strtoll_dec') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strtoll_bin(self): # tests strtoll for binary strings (0x...) test_path = path_from_root('tests', 'core', 'test_strtoll_bin') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strtoll_oct(self): # tests strtoll for decimal strings (0x...) test_path = path_from_root('tests', 'core', 'test_strtoll_oct') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strtol_hex(self): # tests strtoll for hex strings (0x...) test_path = path_from_root('tests', 'core', 'test_strtol_hex') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strtol_dec(self): # tests strtoll for decimal strings (0x...) test_path = path_from_root('tests', 'core', 'test_strtol_dec') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strtol_bin(self): # tests strtoll for binary strings (0x...) test_path = path_from_root('tests', 'core', 'test_strtol_bin') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strtol_oct(self): # tests strtoll for decimal strings (0x...) test_path = path_from_root('tests', 'core', 'test_strtol_oct') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_atexit(self): # Confirms they are called in reverse order test_path = path_from_root('tests', 'core', 'test_atexit') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_pthread_specific(self): src = open(path_from_root('tests', 'pthread', 'specific.c'), 'r').read() expected = open(path_from_root('tests', 'pthread', 'specific.c.txt'), 'r').read() self.do_run(src, expected, force_c=True) def test_tcgetattr(self): src = open(path_from_root('tests', 'termios', 'test_tcgetattr.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_time(self): src = open(path_from_root('tests', 'time', 'src.c'), 'r').read() expected = open(path_from_root('tests', 'time', 'output.txt'), 'r').read() self.do_run(src, expected); def test_timeb(self): # Confirms they are called in reverse order test_path = path_from_root('tests', 'core', 'test_timeb') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_time_c(self): test_path = path_from_root('tests', 'core', 'test_time_c') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_gmtime(self): test_path = path_from_root('tests', 'core', 'test_gmtime') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strptime_tm(self): test_path = path_from_root('tests', 'core', 'test_strptime_tm') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strptime_days(self): test_path = path_from_root('tests', 'core', 'test_strptime_days') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strptime_reentrant(self): test_path = path_from_root('tests', 'core', 'test_strptime_reentrant') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strftime(self): test_path = path_from_root('tests', 'core', 'test_strftime') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_intentional_fault(self): # Some programs intentionally segfault themselves, we should compile that into a throw src = r''' int main () { *(volatile char *)0 = 0; return *(volatile char *)0; } ''' self.do_run(src, 'abort()' if self.run_name != 'asm2g' else 'abort("segmentation fault') def test_trickystring(self): test_path = path_from_root('tests', 'core', 'test_trickystring') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_statics(self): test_path = path_from_root('tests', 'core', 'test_statics') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_copyop(self): # clang generated code is vulnerable to this, as it uses # memcpy for assignments, with hardcoded numbers of bytes # (llvm-gcc copies items one by one). See QUANTUM_SIZE in # settings.js. test_path = path_from_root('tests', 'core', 'test_copyop') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_memcpy_memcmp(self): test_path = path_from_root('tests', 'core', 'test_memcpy_memcmp') src, output = (test_path + s for s in ('.in', '.out')) def check(result, err): result = result.replace('\n \n', '\n') # remove extra node output return hashlib.sha1(result).hexdigest() self.do_run_from_file(src, output, output_nicerizer = check) def test_memcpy2(self): test_path = path_from_root('tests', 'core', 'test_memcpy2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_memcpy3(self): test_path = path_from_root('tests', 'core', 'test_memcpy3') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def test_memset(self): test_path = path_from_root('tests', 'core', 'test_memset') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def test_getopt(self): test_path = path_from_root('tests', 'core', 'test_getopt') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, args=['-t', '12', '-n', 'foobar']) def test_getopt_long(self): test_path = path_from_root('tests', 'core', 'test_getopt_long') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, args=['--file', 'foobar', '-b']) def test_memmove(self): test_path = path_from_root('tests', 'core', 'test_memmove') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_memmove2(self): test_path = path_from_root('tests', 'core', 'test_memmove2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_memmove3(self): test_path = path_from_root('tests', 'core', 'test_memmove3') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_flexarray_struct(self): test_path = path_from_root('tests', 'core', 'test_flexarray_struct') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_bsearch(self): test_path = path_from_root('tests', 'core', 'test_bsearch') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_stack_overflow(self): Settings.ASSERTIONS = 1 self.do_run(open(path_from_root('tests', 'core', 'stack_overflow.cpp')).read(), 'abort()') def test_nestedstructs(self): src = ''' #include <stdio.h> #include "emscripten.h" struct base { int x; float y; union { int a; float b; }; char c; }; struct hashtableentry { int key; base data; }; struct hashset { typedef hashtableentry entry; struct chain { entry elem; chain *next; }; // struct chainchunk { chain chains[100]; chainchunk *next; }; }; struct hashtable : hashset { hashtable() { base *b = NULL; entry *e = NULL; chain *c = NULL; printf("*%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", sizeof(base), int(&(b->x)), int(&(b->y)), int(&(b->a)), int(&(b->b)), int(&(b->c)), sizeof(hashtableentry), int(&(e->key)), int(&(e->data)), int(&(e->data.x)), int(&(e->data.y)), int(&(e->data.a)), int(&(e->data.b)), int(&(e->data.c)), sizeof(hashset::chain), int(&(c->elem)), int(&(c->next)), int(&(c->elem.key)), int(&(c->elem.data)), int(&(c->elem.data.x)), int(&(c->elem.data.y)), int(&(c->elem.data.a)), int(&(c->elem.data.b)), int(&(c->elem.data.c)) ); } }; struct B { char buffer[62]; int last; char laster; char laster2; }; struct Bits { unsigned short A : 1; unsigned short B : 1; unsigned short C : 1; unsigned short D : 1; unsigned short x1 : 1; unsigned short x2 : 1; unsigned short x3 : 1; unsigned short x4 : 1; }; int main() { hashtable t; // Part 2 - the char[] should be compressed, BUT have a padding space at the end so the next // one is aligned properly. Also handle char; char; etc. properly. B *b = NULL; printf("*%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", int(b), int(&(b->buffer)), int(&(b->buffer[0])), int(&(b->buffer[1])), int(&(b->buffer[2])), int(&(b->last)), int(&(b->laster)), int(&(b->laster2)), sizeof(B)); // Part 3 - bitfields, and small structures Bits *b2 = NULL; printf("*%d*\\n", sizeof(Bits)); return 0; } ''' # Bloated memory; same layout as C/C++ self.do_run(src, '*16,0,4,8,8,12|20,0,4,4,8,12,12,16|24,0,20,0,4,4,8,12,12,16*\n*0,0,0,1,2,64,68,69,72*\n*2*') def test_runtimelink(self): return self.skip('BUILD_AS_SHARED_LIB=2 is deprecated') if Building.LLVM_OPTS: return self.skip('LLVM opts will optimize printf into puts in the parent, and the child will still look for puts') if Settings.ASM_JS: return self.skip('asm does not support runtime linking') main, supp = self.setup_runtimelink_test() self.banned_js_engines = [NODE_JS] # node's global scope behaves differently than everything else, needs investigation FIXME Settings.LINKABLE = 1 Settings.BUILD_AS_SHARED_LIB = 2 self.build(supp, self.get_dir(), self.in_dir('supp.cpp')) shutil.move(self.in_dir('supp.cpp.o.js'), self.in_dir('liblib.so')) Settings.BUILD_AS_SHARED_LIB = 0 Settings.RUNTIME_LINKED_LIBS = ['liblib.so']; self.do_run(main, 'supp: 54,2\nmain: 56\nsupp see: 543\nmain see: 76\nok.') def can_dlfcn(self): return True def prep_dlfcn_lib(self): Settings.MAIN_MODULE = 0 Settings.SIDE_MODULE = 1 def prep_dlfcn_main(self): Settings.MAIN_MODULE = 1 Settings.SIDE_MODULE = 0 dlfcn_post_build = ''' def process(filename): src = open(filename, 'r').read().replace( '// {{PRE_RUN_ADDITIONS}}', "FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);" ) open(filename, 'w').write(src) ''' def test_dlfcn_basic(self): if not self.can_dlfcn(): return self.prep_dlfcn_lib() lib_src = ''' #include <cstdio> class Foo { public: Foo() { printf("Constructing lib object.\\n"); } }; Foo global; ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() src = ''' #include <cstdio> #include <dlfcn.h> class Bar { public: Bar() { printf("Constructing main object.\\n"); } }; Bar global; int main() { dlopen("liblib.so", RTLD_NOW); return 0; } ''' self.do_run(src, 'Constructing main object.\nConstructing lib object.\n', post_build=self.dlfcn_post_build) def test_dlfcn_i64(self): if not self.can_dlfcn(): return self.prep_dlfcn_lib() Settings.EXPORTED_FUNCTIONS = ['_foo'] lib_src = ''' int foo(int x) { return (long long)x / (long long)1234; } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() Settings.EXPORTED_FUNCTIONS = ['_main'] src = r''' #include <stdio.h> #include <stdlib.h> #include <dlfcn.h> typedef int (*intfunc)(int); void *p; int main() { p = malloc(1024); void *lib_handle = dlopen("liblib.so", 0); printf("load %p\n", lib_handle); intfunc x = (intfunc)dlsym(lib_handle, "foo"); printf("foo func %p\n", x); if (p == 0) return 1; printf("|%d|\n", x(81234567)); return 0; } ''' self.do_run(src, '|65830|', post_build=self.dlfcn_post_build) def test_dlfcn_em_asm(self): if not self.can_dlfcn(): return self.prep_dlfcn_lib() lib_src = ''' #include <emscripten.h> class Foo { public: Foo() { EM_ASM( Module.print("Constructing lib object.") ); } }; Foo global; ''' filename = 'liblib.cpp' self.build(lib_src, self.get_dir(), filename) shutil.move(filename + '.o.js', 'liblib.so') self.prep_dlfcn_main() src = ''' #include <emscripten.h> #include <dlfcn.h> class Bar { public: Bar() { EM_ASM( Module.print("Constructing main object.") ); } }; Bar global; int main() { dlopen("liblib.so", RTLD_NOW); EM_ASM( Module.print("All done.") ); return 0; } ''' self.do_run(src, 'Constructing main object.\nConstructing lib object.\nAll done.\n', post_build=self.dlfcn_post_build) def test_dlfcn_qsort(self): if not self.can_dlfcn(): return self.prep_dlfcn_lib() Settings.EXPORTED_FUNCTIONS = ['_get_cmp'] lib_src = ''' int lib_cmp(const void* left, const void* right) { const int* a = (const int*) left; const int* b = (const int*) right; if(*a > *b) return 1; else if(*a == *b) return 0; else return -1; } typedef int (*CMP_TYPE)(const void*, const void*); extern "C" CMP_TYPE get_cmp() { return lib_cmp; } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() Settings.EXPORTED_FUNCTIONS = ['_main', '_malloc'] src = ''' #include <stdio.h> #include <stdlib.h> #include <dlfcn.h> typedef int (*CMP_TYPE)(const void*, const void*); int main_cmp(const void* left, const void* right) { const int* a = (const int*) left; const int* b = (const int*) right; if(*a < *b) return 1; else if(*a == *b) return 0; else return -1; } int main() { void* lib_handle; CMP_TYPE (*getter_ptr)(); CMP_TYPE lib_cmp_ptr; int arr[5] = {4, 2, 5, 1, 3}; qsort((void*)arr, 5, sizeof(int), main_cmp); printf("Sort with main comparison: "); for (int i = 0; i < 5; i++) { printf("%d ", arr[i]); } printf("\\n"); lib_handle = dlopen("liblib.so", RTLD_NOW); if (lib_handle == NULL) { printf("Could not load lib.\\n"); return 1; } getter_ptr = (CMP_TYPE (*)()) dlsym(lib_handle, "get_cmp"); if (getter_ptr == NULL) { printf("Could not find func.\\n"); return 1; } lib_cmp_ptr = getter_ptr(); qsort((void*)arr, 5, sizeof(int), lib_cmp_ptr); printf("Sort with lib comparison: "); for (int i = 0; i < 5; i++) { printf("%d ", arr[i]); } printf("\\n"); return 0; } ''' self.do_run(src, 'Sort with main comparison: 5 4 3 2 1 *Sort with lib comparison: 1 2 3 4 5 *', output_nicerizer=lambda x, err: x.replace('\n', '*'), post_build=self.dlfcn_post_build) if Settings.ASM_JS and SPIDERMONKEY_ENGINE and os.path.exists(SPIDERMONKEY_ENGINE[0]): out = run_js('liblib.so', engine=SPIDERMONKEY_ENGINE, full_output=True, stderr=STDOUT) if 'asm' in out: self.validate_asmjs(out) def test_dlfcn_data_and_fptr(self): if not self.can_dlfcn(): return if Building.LLVM_OPTS: return self.skip('LLVM opts will optimize out parent_func') self.prep_dlfcn_lib() lib_src = ''' #include <stdio.h> int global = 42; extern void parent_func(); // a function that is defined in the parent void lib_fptr() { printf("Second calling lib_fptr from main.\\n"); parent_func(); // call it also through a pointer, to check indexizing void (*p_f)(); p_f = parent_func; p_f(); } extern "C" void (*func(int x, void(*fptr)()))() { printf("In func: %d\\n", x); fptr(); return lib_fptr; } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') Settings.EXPORTED_FUNCTIONS = ['_func'] Settings.EXPORTED_GLOBALS = ['_global'] self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() Settings.LINKABLE = 1 src = ''' #include <stdio.h> #include <dlfcn.h> #include <emscripten.h> typedef void (*FUNCTYPE(int, void(*)()))(); FUNCTYPE func; void EMSCRIPTEN_KEEPALIVE parent_func() { printf("parent_func called from child\\n"); } void main_fptr() { printf("First calling main_fptr from lib.\\n"); } int main() { void* lib_handle; FUNCTYPE* func_fptr; // Test basic lib loading. lib_handle = dlopen("liblib.so", RTLD_NOW); if (lib_handle == NULL) { printf("Could not load lib.\\n"); return 1; } // Test looked up function. func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func"); // Load twice to test cache. func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func"); if (func_fptr == NULL) { printf("Could not find func.\\n"); return 1; } // Test passing function pointers across module bounds. void (*fptr)() = func_fptr(13, main_fptr); fptr(); // Test global data. int* global = (int*) dlsym(lib_handle, "global"); if (global == NULL) { printf("Could not find global.\\n"); return 1; } printf("Var: %d\\n", *global); return 0; } ''' Settings.EXPORTED_FUNCTIONS = ['_main'] Settings.EXPORTED_GLOBALS = [] self.do_run(src, 'In func: 13*First calling main_fptr from lib.*Second calling lib_fptr from main.*parent_func called from child*parent_func called from child*Var: 42*', output_nicerizer=lambda x, err: x.replace('\n', '*'), post_build=self.dlfcn_post_build) def test_dlfcn_varargs(self): # this test is not actually valid - it fails natively. the child should fail to be loaded, not load and successfully see the parent print_ints func if not self.can_dlfcn(): return Settings.LINKABLE = 1 self.prep_dlfcn_lib() lib_src = r''' void print_ints(int n, ...); extern "C" void func() { print_ints(2, 13, 42); } ''' dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') Settings.EXPORTED_FUNCTIONS = ['_func'] self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() src = r''' #include <stdarg.h> #include <stdio.h> #include <dlfcn.h> #include <assert.h> void print_ints(int n, ...) { va_list args; va_start(args, n); for (int i = 0; i < n; i++) { printf("%d\n", va_arg(args, int)); } va_end(args); } int main() { void* lib_handle; void (*fptr)(); print_ints(2, 100, 200); lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle); fptr = (void (*)())dlsym(lib_handle, "func"); fptr(); return 0; } ''' Settings.EXPORTED_FUNCTIONS = ['_main'] self.do_run(src, '100\n200\n13\n42\n', post_build=self.dlfcn_post_build) def test_dlfcn_self(self): if not self.can_dlfcn(): return self.prep_dlfcn_main() def post(filename): with open(filename) as f: for line in f: if 'var NAMED_GLOBALS' in line: table = line break else: raise Exception('Could not find symbol table!') table = table[table.find('{'):table.find('}')+1] # ensure there aren't too many globals; we don't want unnamed_addr assert table.count(',') <= 8 test_path = path_from_root('tests', 'core', 'test_dlfcn_self') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, post_build=(None, post)) def test_dlfcn_unique_sig(self): if not self.can_dlfcn(): return self.prep_dlfcn_lib() lib_src = ''' #include <stdio.h> int myfunc(int a, int b, int c, int d, int e, int f, int g, int h, int i, int j, int k, int l, int m) { return 13; } ''' Settings.EXPORTED_FUNCTIONS = ['_myfunc'] dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() src = ''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> typedef int (*FUNCTYPE)(int, int, int, int, int, int, int, int, int, int, int, int, int); int main() { void *lib_handle; FUNCTYPE func_ptr; lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc"); assert(func_ptr != NULL); assert(func_ptr(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == 13); puts("success"); return 0; } ''' Settings.EXPORTED_FUNCTIONS = ['_main', '_malloc'] self.do_run(src, 'success', force_c=True, post_build=self.dlfcn_post_build) def test_dlfcn_stacks(self): if not self.can_dlfcn(): return self.prep_dlfcn_lib() lib_src = ''' #include <assert.h> #include <stdio.h> #include <string.h> int myfunc(const char *input) { char bigstack[1024] = { 0 }; // make sure we didn't just trample the stack! assert(!strcmp(input, "foobar")); snprintf(bigstack, sizeof(bigstack), input); return strlen(bigstack); } ''' Settings.EXPORTED_FUNCTIONS = ['_myfunc'] dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() src = ''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> #include <string.h> typedef int (*FUNCTYPE)(const char *); int main() { void *lib_handle; FUNCTYPE func_ptr; char str[128]; snprintf(str, sizeof(str), "foobar"); // HACK: Use strcmp in the main executable so that it doesn't get optimized out and the dynamic library // is able to use it. assert(!strcmp(str, "foobar")); lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); func_ptr = (FUNCTYPE)dlsym(lib_handle, "myfunc"); assert(func_ptr != NULL); assert(func_ptr(str) == 6); puts("success"); return 0; } ''' Settings.EXPORTED_FUNCTIONS = ['_main', '_malloc', '_strcmp'] self.do_run(src, 'success', force_c=True, post_build=self.dlfcn_post_build) def test_dlfcn_funcs(self): if not self.can_dlfcn(): return self.prep_dlfcn_lib() lib_src = r''' #include <assert.h> #include <stdio.h> #include <string.h> typedef void (*voidfunc)(); typedef void (*intfunc)(int); void callvoid(voidfunc f) { f(); } void callint(voidfunc f, int x) { f(x); } void void_0() { printf("void 0\n"); } void void_1() { printf("void 1\n"); } voidfunc getvoid(int i) { switch(i) { case 0: return void_0; case 1: return void_1; default: return NULL; } } void int_0(int x) { printf("int 0 %d\n", x); } void int_1(int x) { printf("int 1 %d\n", x); } intfunc getint(int i) { switch(i) { case 0: return int_0; case 1: return int_1; default: return NULL; } } ''' Settings.EXPORTED_FUNCTIONS = ['_callvoid', '_callint', '_getvoid', '_getint'] dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() src = r''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> typedef void (*voidfunc)(); typedef void (*intfunc)(int); typedef void (*voidcaller)(voidfunc); typedef void (*intcaller)(intfunc, int); typedef voidfunc (*voidgetter)(int); typedef intfunc (*intgetter)(int); void void_main() { printf("main.\n"); } void int_main(int x) { printf("main %d\n", x); } int main() { printf("go\n"); void *lib_handle; lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); voidcaller callvoid = (voidcaller)dlsym(lib_handle, "callvoid"); assert(callvoid != NULL); callvoid(void_main); intcaller callint = (intcaller)dlsym(lib_handle, "callint"); assert(callint != NULL); callint(int_main, 201); voidgetter getvoid = (voidgetter)dlsym(lib_handle, "getvoid"); assert(getvoid != NULL); callvoid(getvoid(0)); callvoid(getvoid(1)); intgetter getint = (intgetter)dlsym(lib_handle, "getint"); assert(getint != NULL); callint(getint(0), 54); callint(getint(1), 9000); assert(getint(1000) == NULL); puts("ok"); return 0; } ''' Settings.EXPORTED_FUNCTIONS = ['_main', '_malloc'] self.do_run(src, '''go main. main 201 void 0 void 1 int 0 54 int 1 9000 ok ''', force_c=True, post_build=self.dlfcn_post_build) def test_dlfcn_mallocs(self): if not self.can_dlfcn(): return Settings.TOTAL_MEMORY = 64*1024*1024 # will be exhausted without functional malloc/free self.prep_dlfcn_lib() lib_src = r''' #include <assert.h> #include <stdio.h> #include <string.h> #include <stdlib.h> void *mallocproxy(int n) { return malloc(n); } void freeproxy(void *p) { free(p); } ''' Settings.EXPORTED_FUNCTIONS = ['_mallocproxy', '_freeproxy'] dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() src = open(path_from_root('tests', 'dlmalloc_proxy.c')).read() Settings.EXPORTED_FUNCTIONS = ['_main', '_malloc', '_free'] self.do_run(src, '''*294,153*''', force_c=True, post_build=self.dlfcn_post_build) def test_dlfcn_longjmp(self): if not self.can_dlfcn(): return self.prep_dlfcn_lib() lib_src = r''' #include <setjmp.h> void jumpy(jmp_buf buf) { static int i = 0; i++; if (i == 10) longjmp(buf, i); printf("pre %d\n", i); } ''' Settings.EXPORTED_FUNCTIONS = ['_jumpy'] dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.c') self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() src = r''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> #include <setjmp.h> typedef void (*jumpfunc)(jmp_buf); int main() { printf("go!\n"); void *lib_handle; lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); jumpfunc jumpy = (jumpfunc)dlsym(lib_handle, "jumpy"); assert(jumpy); jmp_buf buf; int jmpval = setjmp(buf); if (jmpval == 0) { while (1) jumpy(buf); } else { printf("out!\n"); } return 0; } ''' Settings.EXPORTED_FUNCTIONS = ['_main', '_malloc', '_free'] self.do_run(src, '''go! pre 1 pre 2 pre 3 pre 4 pre 5 pre 6 pre 7 pre 8 pre 9 out! ''', post_build=self.dlfcn_post_build, force_c=True) def zzztest_dlfcn_exceptions(self): # TODO: make this work. need to forward tempRet0 across modules if not self.can_dlfcn(): return Settings.DISABLE_EXCEPTION_CATCHING = 0 self.prep_dlfcn_lib() lib_src = r''' extern "C" { int ok() { return 65; } int fail() { throw 123; } } ''' Settings.EXPORTED_FUNCTIONS = ['_ok', '_fail'] dirname = self.get_dir() filename = os.path.join(dirname, 'liblib.cpp') self.build(lib_src, dirname, filename) shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so')) self.prep_dlfcn_main() src = r''' #include <assert.h> #include <stdio.h> #include <dlfcn.h> typedef int (*intfunc)(); int main() { printf("go!\n"); void *lib_handle; lib_handle = dlopen("liblib.so", RTLD_NOW); assert(lib_handle != NULL); intfunc okk = (intfunc)dlsym(lib_handle, "ok"); intfunc faill = (intfunc)dlsym(lib_handle, "fail"); assert(okk && faill); try { printf("ok: %d\n", okk()); } catch(...) { printf("wha\n"); } try { printf("fail: %d\n", faill()); } catch(int x) { printf("int %d\n", x); } try { printf("fail: %d\n", faill()); } catch(double x) { printf("caught %f\n", x); } return 0; } ''' Settings.EXPORTED_FUNCTIONS = ['_main', '_malloc', '_free'] self.do_run(src, '''go! ok: 65 int 123 ok ''', post_build=self.dlfcn_post_build) def dylink_test(self, main, side, expected, header=None, main_emcc_args=[], force_c=False, need_reverse=True): if header: open('header.h', 'w').write(header) emcc_args = self.emcc_args[:] try: # general settings Settings.DISABLE_EXCEPTION_CATCHING = 1 self.emcc_args += ['--memory-init-file', '0'] # side settings Settings.MAIN_MODULE = 0 Settings.SIDE_MODULE = 1 if type(side) == str: base = 'liblib.cpp' if not force_c else 'liblib.c' try_delete(base + '.o.js') self.build(side, self.get_dir(), base) if force_c: shutil.move(base + '.o.js', 'liblib.cpp.o.js') else: # side is just a library try_delete('liblib.cpp.o.js') Popen([PYTHON, EMCC] + side + self.emcc_args + Settings.serialize() + ['-o', os.path.join(self.get_dir(), 'liblib.cpp.o.js')]).communicate() if SPIDERMONKEY_ENGINE and os.path.exists(SPIDERMONKEY_ENGINE[0]): out = run_js('liblib.cpp.o.js', engine=SPIDERMONKEY_ENGINE, full_output=True, stderr=STDOUT) if 'asm' in out: self.validate_asmjs(out) shutil.move('liblib.cpp.o.js', 'liblib.so') # main settings Settings.MAIN_MODULE = 1 Settings.SIDE_MODULE = 0 open('pre.js', 'w').write(''' var Module = { dynamicLibraries: ['liblib.so'], }; ''') self.emcc_args += ['--pre-js', 'pre.js'] + main_emcc_args if type(main) == str: self.do_run(main, expected, force_c=force_c) else: # main is just a library try_delete('src.cpp.o.js') Popen([PYTHON, EMCC] + main + self.emcc_args + Settings.serialize() + ['-o', os.path.join(self.get_dir(), 'src.cpp.o.js')]).communicate() self.do_run(None, expected, no_build=True) finally: self.emcc_args = emcc_args[:] if need_reverse: # test the reverse as well print 'flip' self.dylink_test(side, main, expected, header, main_emcc_args, force_c, need_reverse=False) def test_dylink_basics(self): self.dylink_test(''' #include <stdio.h> extern int sidey(); int main() { printf("other says %d.", sidey()); return 0; } ''', ''' int sidey() { return 11; } ''', 'other says 11.') def test_dylink_floats(self): self.dylink_test(''' #include <stdio.h> extern float sidey(); int main() { printf("other says %.2f.", sidey()+1); return 0; } ''', ''' float sidey() { return 11.5; } ''', 'other says 12.50') def test_dylink_printfs(self): self.dylink_test(r''' #include <stdio.h> extern void sidey(); int main() { printf("hello from main\n"); sidey(); return 0; } ''', r''' #include <stdio.h> void sidey() { printf("hello from side\n"); } ''', 'hello from main\nhello from side\n') def test_dylink_funcpointer(self): self.dylink_test(r''' #include <stdio.h> #include "header.h" voidfunc sidey(voidfunc f); void a() { printf("hello from funcptr\n"); } int main() { sidey(a)(); return 0; } ''', ''' #include "header.h" voidfunc sidey(voidfunc f) { return f; } ''', 'hello from funcptr\n', header='typedef void (*voidfunc)();') def test_dylink_funcpointers(self): self.dylink_test(r''' #include <stdio.h> #include "header.h" int sidey(voidfunc f); void areturn0() { printf("hello 0\n"); } void areturn1() { printf("hello 1\n"); } void areturn2() { printf("hello 2\n"); } int main(int argc, char **argv) { voidfunc table[3] = { areturn0, areturn1, areturn2 }; table[sidey(NULL)](); return 0; } ''', ''' #include "header.h" int sidey(voidfunc f) { if (f) f(); return 1; } ''', 'hello 1\n', header='typedef void (*voidfunc)();') def test_dylink_global_init(self): self.dylink_test(r''' #include <stdio.h> struct Class { Class() { printf("a new Class\n"); } }; static Class c; int main() { return 0; } ''', r''' void nothing() {} ''', 'a new Class\n') def test_dylink_global_inits(self): def test(): self.dylink_test(header=r''' #include <stdio.h> struct Class { Class(const char *name) { printf("new %s\n", name); } }; ''', main=r''' #include "header.h" static Class c("main"); int main() { return 0; } ''', side=r''' #include "header.h" static Class c("side"); ''', expected=['new main\nnew side\n', 'new side\nnew main\n']) test() if Settings.ASSERTIONS == 1: print 'check warnings' Settings.ASSERTIONS = 2 test() full = run_js('src.cpp.o.js', engine=JS_ENGINES[0], full_output=True, stderr=STDOUT) self.assertNotContained("trying to dynamically load symbol '__ZN5ClassC2EPKc' (from 'liblib.so') that already exists", full) def test_dylink_i64(self): self.dylink_test(''' #include <stdio.h> #include <stdint.h> extern int64_t sidey(); int main() { printf("other says %lld.", sidey()); return 0; } ''', ''' #include <stdint.h> int64_t sidey() { volatile int64_t x = 11; x = x * x * x * x; x += x % 17; x += (x * (1 << 30)); return x; } ''', 'other says 15724949027125.') def test_dylink_class(self): self.dylink_test(header=r''' #include <stdio.h> struct Class { Class(const char *name); }; ''', main=r''' #include "header.h" int main() { Class c("main"); return 0; } ''', side=r''' #include "header.h" Class::Class(const char *name) { printf("new %s\n", name); } ''', expected=['new main\n']) def test_dylink_global_var(self): self.dylink_test(main=r''' #include <stdio.h> extern int x; int main() { printf("extern is %d.\n", x); return 0; } ''', side=r''' int x = 123; ''', expected=['extern is 123.\n']) def test_dylink_global_var_modded(self): self.dylink_test(main=r''' #include <stdio.h> extern int x; int main() { printf("extern is %d.\n", x); return 0; } ''', side=r''' int x = 123; struct Initter { Initter() { x = 456; } }; Initter initter; ''', expected=['extern is 456.\n']) def test_dylink_mallocs(self): self.dylink_test(header=r''' #include <stdlib.h> #include <string.h> char *side(const char *data); ''', main=r''' #include <stdio.h> #include "header.h" int main() { char *temp = side("hello through side\n"); char *ret = (char*)malloc(strlen(temp)+1); strcpy(ret, temp); temp[1] = 'x'; puts(ret); return 0; } ''', side=r''' #include "header.h" char *side(const char *data) { char *ret = (char*)malloc(strlen(data)+1); strcpy(ret, data); return ret; } ''', expected=['hello through side\n']) def test_dylink_jslib(self): open('lib.js', 'w').write(r''' mergeInto(LibraryManager.library, { test_lib_func: function(x) { return x + 17.2; } }); ''') self.dylink_test(header=r''' extern "C" { extern double test_lib_func(int input); } ''', main=r''' #include <stdio.h> #include "header.h" extern double sidey(); int main2() { return 11; } int main() { int input = sidey(); double temp = test_lib_func(input); printf("other says %.2f\n", temp); printf("more: %.5f, %d\n", temp, input); return 0; } ''', side=r''' #include <stdio.h> #include "header.h" extern int main2(); double sidey() { int temp = main2(); printf("main2 sed: %d\n", temp); printf("main2 sed: %u, %c\n", temp, temp/2); return test_lib_func(temp); } ''', expected='other says 45.2', main_emcc_args=['--js-library', 'lib.js']) def test_dylink_global_var_jslib(self): open('lib.js', 'w').write(r''' mergeInto(LibraryManager.library, { jslib_x: 'allocate(1, "i32*", ALLOC_STATIC)', jslib_x__postset: 'HEAP32[_jslib_x>>2] = 148;', }); ''') self.dylink_test(main=r''' #include <stdio.h> extern "C" int jslib_x; extern void call_side(); int main() { printf("main: jslib_x is %d.\n", jslib_x); call_side(); return 0; } ''', side=r''' #include <stdio.h> extern "C" int jslib_x; void call_side() { printf("side: jslib_x is %d.\n", jslib_x); } ''', expected=['main: jslib_x is 148.\nside: jslib_x is 148.\n'], main_emcc_args=['--js-library', 'lib.js']) def test_dylink_syslibs(self): # one module uses libcextra, need to force its inclusion when it isn't the main def test(syslibs, expect_pass=True): print 'syslibs', syslibs, Settings.ASSERTIONS passed = True try: os.environ['EMCC_FORCE_STDLIBS'] = syslibs self.dylink_test(header=r''' #include <string.h> int side(); ''', main=r''' #include <stdio.h> #include <wchar.h> #include "header.h" int main() { printf("|%d|\n", side()); wprintf (L"Characters: %lc %lc\n", L'a', 65); return 0; } ''', side=r''' #include <stdlib.h> #include <malloc.h> #include "header.h" int side() { struct mallinfo m = mallinfo(); return m.arena > 1; } ''', expected=['|1|\nCharacters: a A\n']) except Exception, e: if expect_pass: raise e print '(seeing expected fail)' passed = False assertion = 'build the MAIN_MODULE with EMCC_FORCE_STDLIBS=1 in the environment' if Settings.ASSERTIONS: self.assertContained(assertion, str(e)) else: self.assertNotContained(assertion, str(e)) finally: del os.environ['EMCC_FORCE_STDLIBS'] assert passed == expect_pass test('libc,libcextra') test('1') if 'ASSERTIONS=1' not in self.emcc_args: Settings.ASSERTIONS = 0 test('', expect_pass=False) else: print '(skip ASSERTIONS == 0 part)' Settings.ASSERTIONS = 1 test('', expect_pass=False) def test_dylink_iostream(self): try: os.environ['EMCC_FORCE_STDLIBS'] = 'libcxx' self.dylink_test(header=r''' #include <iostream> #include <string> std::string side(); ''', main=r''' #include "header.h" int main() { std::cout << "hello from main " << side() << std::endl; return 0; } ''', side=r''' #include "header.h" std::string side() { return "and hello from side"; } ''', expected=['hello from main and hello from side\n']) finally: del os.environ['EMCC_FORCE_STDLIBS'] def test_dylink_dynamic_cast(self): # issue 3465 self.dylink_test(header=r''' class Base { public: virtual void printName(); }; class Derived : public Base { public: void printName(); }; ''', main=r''' #include "header.h" #include <iostream> using namespace std; int main() { cout << "starting main" << endl; Base *base = new Base(); Base *derived = new Derived(); base->printName(); derived->printName(); if (dynamic_cast<Derived*>(derived)) { cout << "OK" << endl; } else { cout << "KO" << endl; } return 0; } ''', side=r''' #include "header.h" #include <iostream> using namespace std; void Base::printName() { cout << "Base" << endl; } void Derived::printName() { cout << "Derived" << endl; } ''', expected=['starting main\nBase\nDerived\nOK']) def test_dylink_hyper_dupe(self): Settings.TOTAL_MEMORY = 64*1024*1024 if Settings.ASSERTIONS: self.emcc_args += ['-s', 'ASSERTIONS=2'] # test hyper-dynamic linking, and test duplicate warnings open('third.cpp', 'w').write(r''' int sidef() { return 36; } int sideg = 49; int bsidef() { return 536; } ''') Popen([PYTHON, EMCC, 'third.cpp', '-s', 'SIDE_MODULE=1'] + Building.COMPILER_TEST_OPTS + self.emcc_args + ['-o', 'third.js']).communicate() self.dylink_test(main=r''' #include <stdio.h> #include <emscripten.h> extern int sidef(); extern int sideg; extern int bsidef(); extern int bsideg; int main() { EM_ASM({ Runtime.loadDynamicLibrary('third.js'); // hyper-dynamic! works at least for functions (and consts not used in same block) }); printf("sidef: %d, sideg: %d.\n", sidef(), sideg); printf("bsidef: %d.\n", bsidef()); } ''', side=r''' int sidef() { return 10; } // third.js will try to override these, but fail! int sideg = 20; ''', expected=['sidef: 10, sideg: 20.\nbsidef: 536.\n']) if Settings.ASSERTIONS: print 'check warnings' full = run_js('src.cpp.o.js', engine=JS_ENGINES[0], full_output=True, stderr=STDOUT) #self.assertContained("warning: trying to dynamically load symbol '__Z5sidefv' (from 'third.js') that already exists", full) self.assertContained("warning: trying to dynamically load symbol '_sideg' (from 'third.js') that already exists", full) def test_dylink_dot_a(self): # .a linking must force all .o files inside it, when in a shared module open('third.cpp', 'w').write(r''' int sidef() { return 36; } ''') Popen([PYTHON, EMCC, 'third.cpp'] + Building.COMPILER_TEST_OPTS + self.emcc_args + ['-o', 'third.o', '-c']).communicate() open('fourth.cpp', 'w').write(r''' int sideg() { return 17; } ''') Popen([PYTHON, EMCC, 'fourth.cpp'] + Building.COMPILER_TEST_OPTS + self.emcc_args + ['-o', 'fourth.o', '-c']).communicate() Popen([PYTHON, EMAR, 'rc', 'libfourth.a', 'fourth.o']).communicate() self.dylink_test(main=r''' #include <stdio.h> #include <emscripten.h> extern int sidef(); extern int sideg(); int main() { printf("sidef: %d, sideg: %d.\n", sidef(), sideg()); } ''', side=['libfourth.a', 'third.o'], # contents of libtwo.a must be included, even if they aren't referred to! expected=['sidef: 36, sideg: 17.\n']) def test_dylink_spaghetti(self): self.dylink_test(main=r''' #include <stdio.h> int main_x = 72; extern int side_x; int adjust = side_x + 10; int *ptr = &side_x; struct Class { Class() { printf("main init sees %d, %d, %d.\n", adjust, *ptr, main_x); } }; Class cm; int main() { printf("main main sees %d, %d, %d.\n", adjust, *ptr, main_x); return 0; } ''', side=r''' #include <stdio.h> extern int main_x; int side_x = -534; int adjust2 = main_x + 10; int *ptr2 = &main_x; struct Class { Class() { printf("side init sees %d, %d, %d.\n", adjust2, *ptr2, side_x); } }; Class cs; ''', expected=['side init sees 82, 72, -534.\nmain init sees -524, -534, 72.\nmain main sees -524, -534, 72.', 'main init sees -524, -534, 72.\nside init sees 82, 72, -534.\nmain main sees -524, -534, 72.']) def test_dylink_zlib(self): Building.COMPILER_TEST_OPTS += ['-I' + path_from_root('tests', 'zlib')] Popen([PYTHON, path_from_root('embuilder.py'), 'build' ,'zlib']).communicate() zlib = Cache.get_path(os.path.join('ports-builds', 'zlib', 'libz.a')) try: os.environ['EMCC_FORCE_STDLIBS'] = 'libcextra' side = [zlib] self.dylink_test(main=open(path_from_root('tests', 'zlib', 'example.c'), 'r').read(), side=side, expected=open(path_from_root('tests', 'zlib', 'ref.txt'), 'r').read(), force_c=True) finally: del os.environ['EMCC_FORCE_STDLIBS'] #def test_dylink_bullet(self): # Building.COMPILER_TEST_OPTS += ['-I' + path_from_root('tests', 'bullet', 'src')] # side = get_bullet_library(self, True) # self.dylink_test(main=open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp'), 'r').read(), # side=side, # expected=[open(path_from_root('tests', 'bullet', 'output.txt'), 'r').read(), # different roundings # open(path_from_root('tests', 'bullet', 'output2.txt'), 'r').read(), # open(path_from_root('tests', 'bullet', 'output3.txt'), 'r').read()]) def test_random(self): src = r'''#include <stdlib.h> #include <stdio.h> int main() { srandom(0xdeadbeef); printf("%ld", random()); } ''' self.do_run(src, '956867869') def test_rand(self): src = r'''#include <stdlib.h> #include <stdio.h> #include <assert.h> int main() { // we need RAND_MAX to be a bitmask (power of 2 minus 1). this assertions guarantees // if RAND_MAX changes the test failure will focus attention on that issue here. assert(RAND_MAX == 0x7fffffff); srand(0xdeadbeef); for(int i = 0; i < 10; ++i) printf("%d\n", rand()); unsigned int seed = 0xdeadbeef; for(int i = 0; i < 10; ++i) printf("%d\n", rand_r(&seed)); bool haveEvenAndOdd = true; for(int i = 1; i <= 30; ++i) { int mask = 1 << i; if (mask > RAND_MAX) break; bool haveEven = false; bool haveOdd = false; for(int j = 0; j < 1000 && (!haveEven || !haveOdd); ++j) { if ((rand() & mask) == 0) haveEven = true; else haveOdd = true; } haveEvenAndOdd = haveEvenAndOdd && haveEven && haveOdd; } if (haveEvenAndOdd) printf("Have even and odd!\n"); return 0; } ''' expected = '''490242850 2074599277 1480056542 1912638067 931112055 2110392489 2053422194 1614832492 216117595 174823244 760368382 602359081 1121118963 1291018924 1608306807 352705809 958258461 1182561381 114276303 1481323674 Have even and odd! ''' self.do_run(src, expected) def test_strtod(self): src = r''' #include <stdio.h> #include <stdlib.h> int main() { char* endptr; printf("\n"); printf("%g\n", strtod("0", &endptr)); printf("%g\n", strtod("0.", &endptr)); printf("%g\n", strtod("0.0", &endptr)); printf("%g\n", strtod("-0.0", &endptr)); printf("%g\n", strtod("1", &endptr)); printf("%g\n", strtod("1.", &endptr)); printf("%g\n", strtod("1.0", &endptr)); printf("%g\n", strtod("z1.0", &endptr)); printf("%g\n", strtod("0.5", &endptr)); printf("%g\n", strtod(".5", &endptr)); printf("%g\n", strtod(".a5", &endptr)); printf("%g\n", strtod("123", &endptr)); printf("%g\n", strtod("123.456", &endptr)); printf("%g\n", strtod("-123.456", &endptr)); printf("%g\n", strtod("1234567891234567890", &endptr)); printf("%g\n", strtod("1234567891234567890e+50", &endptr)); printf("%g\n", strtod("84e+220", &endptr)); printf("%g\n", strtod("123e-50", &endptr)); printf("%g\n", strtod("123e-250", &endptr)); printf("%g\n", strtod("123e-450", &endptr)); printf("%g\n", strtod("0x6", &endptr)); printf("%g\n", strtod("-0x0p+0", &endptr)); char str[] = " 12.34e56end"; printf("%g\n", strtod(str, &endptr)); printf("%d\n", endptr - str); printf("%g\n", strtod("84e+420", &endptr)); printf("%.12f\n", strtod("1.2345678900000000e+08", NULL)); return 0; } ''' expected = ''' 0 0 0 -0 1 1 1 0 0.5 0.5 0 123 123.456 -123.456 1.23457e+18 1.23457e+68 8.4e+221 1.23e-48 1.23e-248 0 6 -0 1.234e+57 10 inf 123456789.000000000000 ''' self.do_run(src, re.sub(r'\n\s+', '\n', expected)) self.do_run(src.replace('strtod', 'strtold'), re.sub(r'\n\s+', '\n', expected)) # XXX add real support for long double def test_strtok(self): test_path = path_from_root('tests', 'core', 'test_strtok') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_parseInt(self): src = open(path_from_root('tests', 'parseInt', 'src.c'), 'r').read() expected = open(path_from_root('tests', 'parseInt', 'output.txt'), 'r').read() self.do_run(src, expected) def test_transtrcase(self): test_path = path_from_root('tests', 'core', 'test_transtrcase') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_printf(self): self.banned_js_engines = [NODE_JS, V8_ENGINE] # SpiderMonkey and V8 do different things to float64 typed arrays, un-NaNing, etc. src = open(path_from_root('tests', 'printf', 'test.c'), 'r').read() expected = [open(path_from_root('tests', 'printf', 'output.txt'), 'r').read(), open(path_from_root('tests', 'printf', 'output_i64_1.txt'), 'r').read()] self.do_run(src, expected) def test_printf_2(self): test_path = path_from_root('tests', 'core', 'test_printf_2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_vprintf(self): test_path = path_from_root('tests', 'core', 'test_vprintf') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_vsnprintf(self): test_path = path_from_root('tests', 'core', 'test_vsnprintf') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_printf_more(self): test_path = path_from_root('tests', 'core', 'test_printf_more') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_perrar(self): test_path = path_from_root('tests', 'core', 'test_perrar') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_atoX(self): test_path = path_from_root('tests', 'core', 'test_atoX') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_strstr(self): test_path = path_from_root('tests', 'core', 'test_strstr') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_fnmatch(self): # Run one test without assertions, for additional coverage assert 'asm2m' in test_modes if self.run_name == 'asm2m': i = self.emcc_args.index('ASSERTIONS=1') assert i > 0 and self.emcc_args[i-1] == '-s' self.emcc_args[i] = 'ASSERTIONS=0' print 'flip assertions off' test_path = path_from_root('tests', 'core', 'fnmatch') src, output = (test_path + s for s in ('.c', '.out')) self.do_run_from_file(src, output) def test_sscanf(self): test_path = path_from_root('tests', 'core', 'test_sscanf') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_2(self): # doubles for ftype in ['float', 'double']: src = r''' #include <stdio.h> int main(){ char strval1[] = "1.2345678901"; char strval2[] = "1.23456789e5"; char strval3[] = "1.23456789E5"; char strval4[] = "1.2345678e-5"; char strval5[] = "1.2345678E-5"; double dblval = 1.2345678901; double tstval; sscanf(strval1, "%lf", &tstval); if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); sscanf(strval2, "%lf", &tstval); dblval = 123456.789; if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); sscanf(strval3, "%lf", &tstval); dblval = 123456.789; if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); sscanf(strval4, "%lf", &tstval); dblval = 0.000012345678; if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); sscanf(strval5, "%lf", &tstval); dblval = 0.000012345678; if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval); else printf("Pass: %lf %lf\n", tstval, dblval); return 0; } ''' if ftype == 'float': self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568 Pass: 123456.789063 123456.789063 Pass: 123456.789063 123456.789063 Pass: 0.000012 0.000012 Pass: 0.000012 0.000012''') else: self.do_run(src, '''Pass: 1.234568 1.234568 Pass: 123456.789000 123456.789000 Pass: 123456.789000 123456.789000 Pass: 0.000012 0.000012 Pass: 0.000012 0.000012''') def test_sscanf_n(self): test_path = path_from_root('tests', 'core', 'test_sscanf_n') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_whitespace(self): test_path = path_from_root('tests', 'core', 'test_sscanf_whitespace') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_other_whitespace(self): Settings.SAFE_HEAP = 0 # use i16s in printf test_path = path_from_root('tests', 'core', 'test_sscanf_other_whitespace') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_3(self): test_path = path_from_root('tests', 'core', 'test_sscanf_3') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_4(self): test_path = path_from_root('tests', 'core', 'test_sscanf_4') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_5(self): test_path = path_from_root('tests', 'core', 'test_sscanf_5') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_6(self): test_path = path_from_root('tests', 'core', 'test_sscanf_6') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_skip(self): test_path = path_from_root('tests', 'core', 'test_sscanf_skip') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_caps(self): test_path = path_from_root('tests', 'core', 'test_sscanf_caps') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_hex(self): test_path = path_from_root('tests', 'core', 'test_sscanf_hex') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_sscanf_float(self): test_path = path_from_root('tests', 'core', 'test_sscanf_float') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_langinfo(self): src = open(path_from_root('tests', 'langinfo', 'test.c'), 'r').read() expected = open(path_from_root('tests', 'langinfo', 'output.txt'), 'r').read() self.do_run(src, expected, extra_emscripten_args=['-H', 'libc/langinfo.h']) def test_files(self): self.banned_js_engines = [SPIDERMONKEY_ENGINE] # closure can generate variables called 'gc', which pick up js shell stuff if '-O2' in self.emcc_args: self.emcc_args += ['--closure', '1'] # Use closure here, to test we don't break FS stuff self.emcc_args = filter(lambda x: x != '-g', self.emcc_args) # ensure we test --closure 1 --memory-init-file 1 (-g would disable closure) post = ''' def process(filename): src = \'\'\' var Module = { 'noFSInit': true, 'preRun': function() { FS.createLazyFile('/', 'test.file', 'test.file', true, false); // Test FS_* exporting Module['FS_createDataFile']('/', 'somefile.binary', [100, 200, 50, 25, 10, 77, 123], true, false); // 200 becomes -56, since signed chars are used in memory var test_files_input = 'hi there!'; var test_files_input_index = 0; FS.init(function() { return test_files_input.charCodeAt(test_files_input_index++) || null; }); } }; \'\'\' + open(filename, 'r').read() open(filename, 'w').write(src) ''' other = open(os.path.join(self.get_dir(), 'test.file'), 'w') other.write('some data'); other.close() src = open(path_from_root('tests', 'files.cpp'), 'r').read() mem_file = 'src.cpp.o.js.mem' orig_args = self.emcc_args for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]: self.emcc_args = orig_args + mode try_delete(mem_file) self.do_run(src, ('size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\n \ntexte\n', 'size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\ntexte\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n5 bytes to dev/null: 5\nok.\n'), post_build=post, extra_emscripten_args=['-H', 'libc/fcntl.h']) if self.uses_memory_init_file(): assert os.path.exists(mem_file) def test_files_m(self): # Test for Module.stdin etc. post = ''' def process(filename): src = \'\'\' var Module = { data: [10, 20, 40, 30], stdin: function() { return Module.data.pop() || null }, stdout: function(x) { Module.print('got: ' + x) } }; \'\'\' + open(filename, 'r').read() open(filename, 'w').write(src) ''' src = r''' #include <stdio.h> #include <unistd.h> int main () { char c; fprintf(stderr, "isatty? %d,%d,%d\n", isatty(fileno(stdin)), isatty(fileno(stdout)), isatty(fileno(stderr))); while ((c = fgetc(stdin)) != EOF) { putc(c+5, stdout); } return 0; } ''' def clean(out, err): return '\n'.join(filter(lambda line: 'warning' not in line, (out + err).split('\n'))) self.do_run(src, ('got: 35\ngot: 45\ngot: 25\ngot: 15\n \nisatty? 0,0,1\n', 'got: 35\ngot: 45\ngot: 25\ngot: 15\nisatty? 0,0,1\n', 'isatty? 0,0,1\ngot: 35\ngot: 45\ngot: 25\ngot: 15\n'), post_build=post, output_nicerizer=clean) def test_mount(self): src = open(path_from_root('tests', 'fs', 'test_mount.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_fwrite_0(self): test_path = path_from_root('tests', 'core', 'test_fwrite_0') src, output = (test_path + s for s in ('.in', '.out')) orig_args = self.emcc_args for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]: self.emcc_args = orig_args + mode self.do_run_from_file(src, output) def test_fgetc_ungetc(self): self.clear() orig_compiler_opts = Building.COMPILER_TEST_OPTS[:] for fs in ['MEMFS', 'NODEFS']: src = open(path_from_root('tests', 'stdio', 'test_fgetc_ungetc.c'), 'r').read() Building.COMPILER_TEST_OPTS = orig_compiler_opts + ['-D' + fs] self.do_run(src, 'success', force_c=True, js_engines=[NODE_JS]) def test_fgetc_unsigned(self): src = r''' #include <stdio.h> int main() { FILE *file = fopen("file_with_byte_234.txt", "rb"); int c = fgetc(file); printf("*%d\n", c); } ''' open('file_with_byte_234.txt', 'wb').write('\xea') self.emcc_args += ['--embed-file', 'file_with_byte_234.txt'] self.do_run(src, '*234\n') def test_fgets_eol(self): src = r''' #include <stdio.h> char buf[32]; int main() { char *r = "SUCCESS"; FILE *f = fopen("eol.txt", "r"); while (fgets(buf, 32, f) != NULL) { if (buf[0] == '\0') { r = "FAIL"; break; } } printf("%s\n", r); fclose(f); return 0; } ''' open('eol.txt', 'wb').write('\n') self.emcc_args += ['--embed-file', 'eol.txt'] self.do_run(src, 'SUCCESS\n') def test_fscanf(self): open(os.path.join(self.get_dir(), 'three_numbers.txt'), 'w').write('''-1 0.1 -.1''') src = r''' #include <stdio.h> #include <assert.h> #include <float.h> int main() { float x = FLT_MAX, y = FLT_MAX, z = FLT_MAX; FILE* fp = fopen("three_numbers.txt", "r"); if (fp) { int match = fscanf(fp, " %f %f %f ", &x, &y, &z); printf("match = %d\n", match); printf("x = %0.1f, y = %0.1f, z = %0.1f\n", x, y, z); } else { printf("failed to open three_numbers.txt\n"); } return 0; } ''' self.emcc_args += ['--embed-file', 'three_numbers.txt'] self.do_run(src, 'match = 3\nx = -1.0, y = 0.1, z = -0.1\n') def test_fscanf_2(self): open('a.txt', 'w').write('''1/2/3 4/5/6 7/8/9 ''') self.emcc_args += ['--embed-file', 'a.txt'] self.do_run(r'''#include <cstdio> #include <iostream> using namespace std; int main( int argv, char ** argc ) { cout << "fscanf test" << endl; FILE * file; file = fopen("a.txt", "rb"); int vertexIndex[4]; int normalIndex[4]; int uvIndex[4]; int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d\n", &vertexIndex[0], &uvIndex[0], &normalIndex[0], &vertexIndex [1], &uvIndex[1], &normalIndex[1], &vertexIndex[2], &uvIndex[2], &normalIndex[2], &vertexIndex[3], &uvIndex[3], &normalIndex[3]); cout << matches << endl; return 0; } ''', 'fscanf test\n9\n') def test_fileno(self): open(os.path.join(self.get_dir(), 'empty.txt'), 'w').write('') src = r''' #include <stdio.h> #include <unistd.h> int main() { FILE* fp = fopen("empty.txt", "r"); if (fp) { printf("%d\n", fp); printf("%d\n", fileno(fp)); printf("%d\n", fileno((FILE*)42)); // nonexistent stream } else { printf("failed to open empty.txt\n"); } return 0; } ''' self.emcc_args += ['--embed-file', 'empty.txt'] self.do_run(src, '4\n3\n-1\n') def test_readdir(self): self.banned_js_engines = [V8_ENGINE] # stderr printing limitations in v8 src = open(path_from_root('tests', 'dirent', 'test_readdir.c'), 'r').read() self.do_run(src, '''SIGILL: Illegal instruction success n: 7 name: tmp name: nocanread name: home name: foobar name: dev name: .. name: . ''', force_c=True) def test_readdir_empty(self): src = open(path_from_root('tests', 'dirent', 'test_readdir_empty.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_stat(self): src = open(path_from_root('tests', 'stat', 'test_stat.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_stat_chmod(self): src = open(path_from_root('tests', 'stat', 'test_chmod.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_stat_mknod(self): src = open(path_from_root('tests', 'stat', 'test_mknod.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_fcntl(self): add_pre_run = ''' def process(filename): src = open(filename, 'r').read().replace( '// {{PRE_RUN_ADDITIONS}}', "FS.createDataFile('/', 'test', 'abcdef', true, true);" ) open(filename, 'w').write(src) ''' src = open(path_from_root('tests', 'fcntl', 'src.c'), 'r').read() expected = open(path_from_root('tests', 'fcntl', 'output.txt'), 'r').read() self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h']) def test_fcntl_open(self): src = open(path_from_root('tests', 'fcntl-open', 'src.c'), 'r').read() expected = open(path_from_root('tests', 'fcntl-open', 'output.txt'), 'r').read() self.do_run(src, expected, force_c=True, extra_emscripten_args=['-H', 'libc/fcntl.h']) def test_fcntl_misc(self): add_pre_run = ''' def process(filename): src = open(filename, 'r').read().replace( '// {{PRE_RUN_ADDITIONS}}', "FS.createDataFile('/', 'test', 'abcdef', true, true);" ) open(filename, 'w').write(src) ''' src = open(path_from_root('tests', 'fcntl-misc', 'src.c'), 'r').read() expected = open(path_from_root('tests', 'fcntl-misc', 'output.txt'), 'r').read() self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h']) def test_poll(self): add_pre_run = ''' def process(filename): src = open(filename, 'r').read().replace( '// {{PRE_RUN_ADDITIONS}}', \'\'\' var dummy_device = FS.makedev(64, 0); FS.registerDevice(dummy_device, {}); FS.createDataFile('/', 'file', 'abcdef', true, true); FS.mkdev('/device', dummy_device); \'\'\' ) open(filename, 'w').write(src) ''' test_path = path_from_root('tests', 'core', 'test_poll') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h,poll.h']) def test_statvfs(self): test_path = path_from_root('tests', 'core', 'test_statvfs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_libgen(self): test_path = path_from_root('tests', 'core', 'test_libgen') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_utime(self): src = open(path_from_root('tests', 'utime', 'test_utime.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_utf(self): self.banned_js_engines = [SPIDERMONKEY_ENGINE] # only node handles utf well Settings.EXPORTED_FUNCTIONS = ['_main', '_malloc'] test_path = path_from_root('tests', 'core', 'test_utf') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_utf32(self): self.do_run(open(path_from_root('tests', 'utf32.cpp')).read(), 'OK.') self.do_run(open(path_from_root('tests', 'utf32.cpp')).read(), 'OK.', args=['-fshort-wchar']) def test_utf8(self): Building.COMPILER_TEST_OPTS += ['-std=c++11'] self.do_run(open(path_from_root('tests', 'utf8.cpp')).read(), 'OK.') def test_wprintf(self): test_path = path_from_root('tests', 'core', 'test_wprintf') src, output = (test_path + s for s in ('.c', '.out')) orig_args = self.emcc_args for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]: self.emcc_args = orig_args + mode self.do_run_from_file(src, output) def test_direct_string_constant_usage(self): test_path = path_from_root('tests', 'core', 'test_direct_string_constant_usage') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_std_cout_new(self): test_path = path_from_root('tests', 'core', 'test_std_cout_new') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_istream(self): test_path = path_from_root('tests', 'core', 'test_istream') src, output = (test_path + s for s in ('.in', '.out')) for linkable in [0]:#, 1]: print linkable Settings.LINKABLE = linkable # regression check for issue #273 self.do_run_from_file(src, output) def test_fs_base(self): Settings.INCLUDE_FULL_LIBRARY = 1 try: addJS = ''' def process(filename): import tools.shared as shared src = open(filename, 'r').read().replace('FS.init();', '').replace( # Disable normal initialization, replace with ours '// {{PRE_RUN_ADDITIONS}}', open(shared.path_from_root('tests', 'filesystem', 'src.js'), 'r').read()) open(filename, 'w').write(src) ''' src = 'int main() {return 0;}\n' expected = open(path_from_root('tests', 'filesystem', 'output.txt'), 'r').read() self.do_run(src, expected, post_build=addJS, extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/langinfo.h,libc/time.h']) finally: Settings.INCLUDE_FULL_LIBRARY = 0 def test_fs_nodefs_rw(self): src = open(path_from_root('tests', 'fs', 'test_nodefs_rw.c'), 'r').read() self.do_run(src, 'success', force_c=True, js_engines=[NODE_JS]) def test_fs_trackingdelegate(self): src = path_from_root('tests', 'fs', 'test_trackingdelegate.c') out = path_from_root('tests', 'fs', 'test_trackingdelegate.out') self.do_run_from_file(src, out) def test_fs_writeFile(self): self.emcc_args += ['-s', 'DISABLE_EXCEPTION_CATCHING=1'] # see issue 2334 src = path_from_root('tests', 'fs', 'test_writeFile.cc') out = path_from_root('tests', 'fs', 'test_writeFile.out') self.do_run_from_file(src, out) def test_fs_emptyPath(self): src = path_from_root('tests', 'fs', 'test_emptyPath.c') out = path_from_root('tests', 'fs', 'test_emptyPath.out') self.do_run_from_file(src, out) def test_fs_append(self): src = open(path_from_root('tests', 'fs', 'test_append.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_fs_mmap(self): orig_compiler_opts = Building.COMPILER_TEST_OPTS[:] for fs in ['MEMFS']: src = path_from_root('tests', 'fs', 'test_mmap.c') out = path_from_root('tests', 'fs', 'test_mmap.out') Building.COMPILER_TEST_OPTS = orig_compiler_opts + ['-D' + fs] self.do_run_from_file(src, out) def test_unistd_access(self): self.clear() orig_compiler_opts = Building.COMPILER_TEST_OPTS[:] for fs in ['MEMFS', 'NODEFS']: src = open(path_from_root('tests', 'unistd', 'access.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'access.out'), 'r').read() Building.COMPILER_TEST_OPTS = orig_compiler_opts + ['-D' + fs] self.do_run(src, expected, js_engines=[NODE_JS]) def test_unistd_curdir(self): src = open(path_from_root('tests', 'unistd', 'curdir.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'curdir.out'), 'r').read() self.do_run(src, expected) def test_unistd_close(self): src = open(path_from_root('tests', 'unistd', 'close.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'close.out'), 'r').read() self.do_run(src, expected) def test_unistd_confstr(self): src = open(path_from_root('tests', 'unistd', 'confstr.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'confstr.out'), 'r').read() self.do_run(src, expected, extra_emscripten_args=['-H', 'libc/unistd.h']) def test_unistd_ttyname(self): src = open(path_from_root('tests', 'unistd', 'ttyname.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_unistd_dup(self): src = open(path_from_root('tests', 'unistd', 'dup.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'dup.out'), 'r').read() self.do_run(src, expected) def test_unistd_pathconf(self): src = open(path_from_root('tests', 'unistd', 'pathconf.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'pathconf.out'), 'r').read() self.do_run(src, expected) def test_unistd_truncate(self): self.clear() orig_compiler_opts = Building.COMPILER_TEST_OPTS[:] for fs in ['MEMFS', 'NODEFS']: src = open(path_from_root('tests', 'unistd', 'truncate.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'truncate.out'), 'r').read() Building.COMPILER_TEST_OPTS = orig_compiler_opts + ['-D' + fs] self.do_run(src, expected, js_engines=[NODE_JS]) def test_unistd_swab(self): src = open(path_from_root('tests', 'unistd', 'swab.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'swab.out'), 'r').read() self.do_run(src, expected) def test_unistd_isatty(self): src = open(path_from_root('tests', 'unistd', 'isatty.c'), 'r').read() self.do_run(src, 'success', force_c=True) def test_unistd_sysconf(self): src = open(path_from_root('tests', 'unistd', 'sysconf.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'sysconf.out'), 'r').read() self.do_run(src, expected) def test_unistd_login(self): src = open(path_from_root('tests', 'unistd', 'login.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'login.out'), 'r').read() self.do_run(src, expected) def test_unistd_unlink(self): self.clear() orig_compiler_opts = Building.COMPILER_TEST_OPTS[:] for fs in ['MEMFS', 'NODEFS']: src = open(path_from_root('tests', 'unistd', 'unlink.c'), 'r').read() Building.COMPILER_TEST_OPTS = orig_compiler_opts + ['-D' + fs] self.do_run(src, 'success', force_c=True, js_engines=[NODE_JS]) def test_unistd_links(self): self.clear() orig_compiler_opts = Building.COMPILER_TEST_OPTS[:] for fs in ['MEMFS', 'NODEFS']: if WINDOWS and fs == 'NODEFS': print >> sys.stderr, 'Skipping NODEFS part of this test for test_unistd_links on Windows, since it would require administrative privileges.' # Also, other detected discrepancies if you do end up running this test on NODEFS: # test expects /, but Windows gives \ as path slashes. # Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows. continue src = open(path_from_root('tests', 'unistd', 'links.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'links.out'), 'r').read() Building.COMPILER_TEST_OPTS = orig_compiler_opts + ['-D' + fs] self.do_run(src, expected, js_engines=[NODE_JS]) def test_unistd_symlink_on_nodefs(self): self.clear() orig_compiler_opts = Building.COMPILER_TEST_OPTS[:] for fs in ['NODEFS']: if WINDOWS and fs == 'NODEFS': print >> sys.stderr, 'Skipping NODEFS part of this test for test_unistd_symlink_on_nodefs on Windows, since it would require administrative privileges.' # Also, other detected discrepancies if you do end up running this test on NODEFS: # test expects /, but Windows gives \ as path slashes. # Calling readlink() on a non-link gives error 22 EINVAL on Unix, but simply error 0 OK on Windows. continue src = open(path_from_root('tests', 'unistd', 'symlink_on_nodefs.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'symlink_on_nodefs.out'), 'r').read() Building.COMPILER_TEST_OPTS = orig_compiler_opts + ['-D' + fs] self.do_run(src, expected, js_engines=[NODE_JS]) def test_unistd_sleep(self): src = open(path_from_root('tests', 'unistd', 'sleep.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'sleep.out'), 'r').read() self.do_run(src, expected) def test_unistd_io(self): self.clear() orig_compiler_opts = Building.COMPILER_TEST_OPTS[:] for fs in ['MEMFS', 'NODEFS']: src = open(path_from_root('tests', 'unistd', 'io.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'io.out'), 'r').read() Building.COMPILER_TEST_OPTS = orig_compiler_opts + ['-D' + fs] self.do_run(src, expected, js_engines=[NODE_JS]) def test_unistd_misc(self): orig_compiler_opts = Building.COMPILER_TEST_OPTS[:] for fs in ['MEMFS', 'NODEFS']: src = open(path_from_root('tests', 'unistd', 'misc.c'), 'r').read() expected = open(path_from_root('tests', 'unistd', 'misc.out'), 'r').read() Building.COMPILER_TEST_OPTS = orig_compiler_opts + ['-D' + fs] self.do_run(src, expected, js_engines=[NODE_JS]) def test_posixtime(self): test_path = path_from_root('tests', 'core', 'test_posixtime') src, output = (test_path + s for s in ('.c', '.out')) self.banned_js_engines = [V8_ENGINE] # v8 lacks monotonic time self.do_run_from_file(src, output) if V8_ENGINE in JS_ENGINES: self.banned_js_engines = filter(lambda engine: engine != V8_ENGINE, JS_ENGINES) self.do_run_from_file(src, test_path + '_no_monotonic.out') else: print '(no v8, skipping no-monotonic case)' def test_uname(self): test_path = path_from_root('tests', 'core', 'test_uname') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_env(self): src = open(path_from_root('tests', 'env', 'src.c'), 'r').read() expected = open(path_from_root('tests', 'env', 'output.txt'), 'r').read() self.do_run(src, [ expected.replace('{{{ THIS_PROGRAM }}}', os.path.join(self.get_dir(), 'src.cpp.o.js').replace('\\', '/')), # node, can find itself properly expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8 ]) def test_environ(self): src = open(path_from_root('tests', 'env', 'src-mini.c'), 'r').read() expected = open(path_from_root('tests', 'env', 'output-mini.txt'), 'r').read() self.do_run(src, [ expected.replace('{{{ THIS_PROGRAM }}}', os.path.join(self.get_dir(), 'src.cpp.o.js').replace('\\', '/')), # node, can find itself properly expected.replace('{{{ THIS_PROGRAM }}}', './this.program') # spidermonkey, v8 ]) def test_systypes(self): src = open(path_from_root('tests', 'systypes', 'src.c'), 'r').read() expected = open(path_from_root('tests', 'systypes', 'output.txt'), 'r').read() self.do_run(src, expected) def test_getloadavg(self): test_path = path_from_root('tests', 'core', 'test_getloadavg') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_nl_types(self): test_path = path_from_root('tests', 'core', 'test_nl_types') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_799(self): src = open(path_from_root('tests', '799.cpp'), 'r').read() self.do_run(src, '''Set PORT family: 0, port: 3979 Get PORT family: 0 PORT: 3979 ''') def test_ctype(self): src = open(path_from_root('tests', 'ctype', 'src.c'), 'r').read() expected = open(path_from_root('tests', 'ctype', 'output.txt'), 'r').read() self.do_run(src, expected) def test_strcasecmp(self): test_path = path_from_root('tests', 'core', 'test_strcasecmp') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_atomic(self): test_path = path_from_root('tests', 'core', 'test_atomic') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_atomic_cxx(self): test_path = path_from_root('tests', 'core', 'test_atomic_cxx') src, output = (test_path + s for s in ('.cpp', '.txt')) Building.COMPILER_TEST_OPTS += ['-std=c++11'] self.do_run_from_file(src, output) print 'main module' Settings.MAIN_MODULE = 1 self.do_run_from_file(src, output) def test_phiundef(self): test_path = path_from_root('tests', 'core', 'test_phiundef') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_netinet_in(self): src = open(path_from_root('tests', 'netinet', 'in.cpp'), 'r').read() expected = open(path_from_root('tests', 'netinet', 'in.out'), 'r').read() self.do_run(src, expected) # libc++ tests def test_iostream(self): src = ''' #include <iostream> int main() { std::cout << "hello world" << std::endl << 77 << "." << std::endl; return 0; } ''' # FIXME: should not have so many newlines in output here self.do_run(src, 'hello world\n77.\n') def test_stdvec(self): test_path = path_from_root('tests', 'core', 'test_stdvec') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_random_device(self): Building.COMPILER_TEST_OPTS += ['-std=c++11'] test_path = path_from_root('tests', 'core', 'test_random_device') src, output = (test_path + s for s in ('.cpp', '.txt')) self.do_run_from_file(src, output) def test_reinterpreted_ptrs(self): test_path = path_from_root('tests', 'core', 'test_reinterpreted_ptrs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_jansson(self): return self.skip('currently broken') if Settings.SAFE_HEAP: return self.skip('jansson is not safe-heap safe') src = ''' #include <jansson.h> #include <stdio.h> #include <string.h> int main() { const char* jsonString = "{\\"key\\": \\"value\\",\\"array\\": [\\"array_item1\\",\\"array_item2\\",\\"array_item3\\"],\\"dict\\":{\\"number\\": 3,\\"float\\": 2.2}}"; json_error_t error; json_t *root = json_loadb(jsonString, strlen(jsonString), 0, &error); if(!root) { printf("Node `root` is `null`."); return 0; } if(!json_is_object(root)) { printf("Node `root` is no object."); return 0; } printf("%s\\n", json_string_value(json_object_get(root, "key"))); json_t *array = json_object_get(root, "array"); if(!array) { printf("Node `array` is `null`."); return 0; } if(!json_is_array(array)) { printf("Node `array` is no array."); return 0; } for(size_t i=0; i<json_array_size(array); ++i) { json_t *arrayNode = json_array_get(array, i); if(!root || !json_is_string(arrayNode)) return 0; printf("%s\\n", json_string_value(arrayNode)); } json_t *dict = json_object_get(root, "dict"); if(!dict || !json_is_object(dict)) return 0; json_t *numberNode = json_object_get(dict, "number"); json_t *floatNode = json_object_get(dict, "float"); if(!numberNode || !json_is_number(numberNode) || !floatNode || !json_is_real(floatNode)) return 0; printf("%i\\n", json_integer_value(numberNode)); printf("%.2f\\n", json_number_value(numberNode)); printf("%.2f\\n", json_real_value(floatNode)); json_t *invalidNode = json_object_get(dict, "invalidNode"); if(invalidNode) return 0; printf("%i\\n", json_number_value(invalidNode)); json_decref(root); if(!json_is_object(root)) printf("jansson!\\n"); return 0; } ''' self.do_run(src, 'value\narray_item1\narray_item2\narray_item3\n3\n3.00\n2.20\nJansson: Node with ID `0` not found. Context has `10` nodes.\n0\nJansson: No JSON context.\njansson!') def test_js_libraries(self): open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(''' #include <stdio.h> extern "C" { extern void printey(); extern int calcey(int x, int y); } int main() { printey(); printf("*%d*\\n", calcey(10, 22)); return 0; } ''') open(os.path.join(self.get_dir(), 'mylib1.js'), 'w').write(''' mergeInto(LibraryManager.library, { printey: function() { Module.print('hello from lib!'); } }); ''') open(os.path.join(self.get_dir(), 'mylib2.js'), 'w').write(''' mergeInto(LibraryManager.library, { calcey: function(x, y) { return x + y; } }); ''') self.emcc_args += ['--js-library', os.path.join(self.get_dir(), 'mylib1.js'), '--js-library', os.path.join(self.get_dir(), 'mylib2.js')] self.do_run(open(os.path.join(self.get_dir(), 'main.cpp'), 'r').read(), 'hello from lib!\n*32*\n') def test_unicode_js_library(self): open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(''' #include <stdio.h> extern "C" { extern void printey(); } int main() { printey(); return 0; } ''') self.emcc_args += ['--js-library', path_from_root('tests', 'unicode_library.js')] self.do_run(open(os.path.join(self.get_dir(), 'main.cpp'), 'r').read(), u'Unicode snowman \u2603 says hello!') def test_constglobalunion(self): self.emcc_args += ['-s', 'EXPORT_ALL=1'] self.do_run(r''' #include <stdio.h> struct one_const { long a; }; struct two_consts { long a; long b; }; union some_consts { struct one_const one; struct two_consts two; }; union some_consts my_consts = {{ 1 }}; struct one_const addr_of_my_consts = { (long)(&my_consts) }; int main(void) { printf("%li\n", !!addr_of_my_consts.a); return 0; } ''', '1') ### 'Medium' tests def test_fannkuch(self): results = [ (1,0), (2,1), (3,2), (4,4), (5,7), (6,10), (7, 16), (8,22) ] for i, j in results: src = open(path_from_root('tests', 'fannkuch.cpp'), 'r').read() self.do_run(src, 'Pfannkuchen(%d) = %d.' % (i,j), [str(i)], no_build=i>1) def test_raytrace(self): # TODO: Should we remove this test? return self.skip('Relies on double value rounding, extremely sensitive') src = open(path_from_root('tests', 'raytrace.cpp'), 'r').read().replace('double', 'float') output = open(path_from_root('tests', 'raytrace.ppm'), 'r').read() self.do_run(src, output, ['3', '16'])#, build_ll_hook=self.do_autodebug) def test_fasta(self): results = [ (1,'''GG*ctt**tgagc*'''), (20,'''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tacgtgtagcctagtgtttgtgttgcgttatagtctatttgtggacacagtatggtcaaa**tgacgtcttttgatctgacggcgttaacaaagatactctg*'''), (50,'''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA*TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa**NtactMcSMtYtcMgRtacttctWBacgaa**agatactctgggcaacacacatacttctctcatgttgtttcttcggacctttcataacct**ttcctggcacatggttagctgcacatcacaggattgtaagggtctagtggttcagtgagc**ggaatatcattcgtcggtggtgttaatctatctcggtgtagcttataaatgcatccgtaa**gaatattatgtttatttgtcggtacgttcatggtagtggtgtcgccgatttagacgtaaa**ggcatgtatg*''') ] for precision in [0, 1, 2]: Settings.PRECISE_F32 = precision for t in ['float', 'double']: print precision, t src = open(path_from_root('tests', 'fasta.cpp'), 'r').read().replace('double', t) for i, j in results: self.do_run(src, j, [str(i)], lambda x, err: x.replace('\n', '*'), no_build=i>1) shutil.copyfile('src.cpp.o.js', '%d_%s.js' % (precision, t)) def test_whets(self): self.do_run(open(path_from_root('tests', 'whets.cpp')).read(), 'Single Precision C Whetstone Benchmark') def test_dlmalloc(self): self.banned_js_engines = [NODE_JS] # slower, and fail on 64-bit Settings.TOTAL_MEMORY = 128*1024*1024 # needed with typed arrays src = open(path_from_root('system', 'lib', 'dlmalloc.c'), 'r').read() + '\n\n\n' + open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read() self.do_run(src, '*1,0*', ['200', '1']) self.do_run(src, '*400,0*', ['400', '400'], no_build=True) # Linked version src = open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read() self.do_run(src, '*1,0*', ['200', '1'], extra_emscripten_args=['-m']) self.do_run(src, '*400,0*', ['400', '400'], extra_emscripten_args=['-m'], no_build=True) if self.emcc_args == []: # TODO: do this in other passes too, passing their opts into emcc # emcc should build in dlmalloc automatically, and do all the sign correction etc. for it try_delete(os.path.join(self.get_dir(), 'src.cpp.o.js')) output = Popen([PYTHON, EMCC, path_from_root('tests', 'dlmalloc_test.c'), '-s', 'TOTAL_MEMORY=' + str(128*1024*1024), '-o', os.path.join(self.get_dir(), 'src.cpp.o.js')], stdout=PIPE, stderr=self.stderr_redirect).communicate() self.do_run('x', '*1,0*', ['200', '1'], no_build=True) self.do_run('x', '*400,0*', ['400', '400'], no_build=True) # The same for new and all its variants src = open(path_from_root('tests', 'new.cpp')).read() for new, delete in [ ('malloc(100)', 'free'), ('new char[100]', 'delete[]'), ('new Structy', 'delete'), ('new int', 'delete'), ('new Structy[10]', 'delete[]'), ]: self.do_run(src.replace('{{{ NEW }}}', new).replace('{{{ DELETE }}}', delete), '*1,0*') def test_dlmalloc_partial(self): # present part of the symbols of dlmalloc, not all src = open(path_from_root('tests', 'new.cpp')).read().replace('{{{ NEW }}}', 'new int').replace('{{{ DELETE }}}', 'delete') + ''' void * operator new(size_t size) { printf("new %d!\\n", size); return malloc(size); } ''' self.do_run(src, 'new 4!\n*1,0*') def test_dlmalloc_partial_2(self): if 'SAFE_HEAP' in str(self.emcc_args): return self.skip('only emcc will link in dlmalloc, and we do unsafe stuff') # present part of the symbols of dlmalloc, not all. malloc is harder to link than new which is weak. test_path = path_from_root('tests', 'core', 'test_dlmalloc_partial_2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_libcxx(self): self.do_run(open(path_from_root('tests', 'hashtest.cpp')).read(), 'june -> 30\nPrevious (in alphabetical order) is july\nNext (in alphabetical order) is march') self.do_run(''' #include <set> #include <stdio.h> int main() { std::set<int> *fetchOriginatorNums = new std::set<int>(); fetchOriginatorNums->insert(171); printf("hello world\\n"); return 0; } ''', 'hello world'); def test_typeid(self): test_path = path_from_root('tests', 'core', 'test_typeid') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_static_variable(self): test_path = path_from_root('tests', 'core', 'test_static_variable') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_fakestat(self): test_path = path_from_root('tests', 'core', 'test_fakestat') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_mmap(self): Settings.TOTAL_MEMORY = 128*1024*1024 test_path = path_from_root('tests', 'core', 'test_mmap') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) self.do_run_from_file(src, output, force_c=True) def test_mmap_file(self): for extra_args in [[], ['--no-heap-copy']]: self.emcc_args += ['--embed-file', 'data.dat'] + extra_args open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000)) src = open(path_from_root('tests', 'mmap_file.c')).read() self.do_run(src, '*\ndata from the file .\nfrom the file ......\n*\n') def test_cubescript(self): assert 'asm3' in test_modes if self.run_name == 'asm3': self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage Building.COMPILER_TEST_OPTS = filter(lambda x: x != '-g', Building.COMPILER_TEST_OPTS) # remove -g, so we have one test without it by default def test(): self.do_run(path_from_root('tests', 'cubescript'), '*\nTemp is 33\n9\n5\nhello, everyone\n*', main_file='command.cpp') test() assert 'asm1' in test_modes if self.run_name == 'asm1': print 'verifing postsets' generated = open('src.cpp.o.js').read() generated = re.sub(r'\n+[ \n]*\n+', '\n', generated) main = generated[generated.find('function runPostSets'):] main = main[:main.find('\n}')] assert main.count('\n') <= 7, ('must not emit too many postSets: %d' % main.count('\n')) + ' : ' + main print 'relocatable' assert Settings.RELOCATABLE == Settings.EMULATED_FUNCTION_POINTERS == 0 Settings.RELOCATABLE = Settings.EMULATED_FUNCTION_POINTERS = 1 test() Settings.RELOCATABLE = Settings.EMULATED_FUNCTION_POINTERS = 0 if self.is_emterpreter(): print 'emterpreter/async/assertions' # extra coverage self.emcc_args += ['-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1'] test() print 'emterpreter/async/assertions/whitelist' self.emcc_args += ['-s', 'EMTERPRETIFY_WHITELIST=["_frexpl"]'] # test double call assertions test() def test_sse1(self): return self.skip('TODO: This test fails due to bugs #2840, #3044, #3045, #3046 and #3048 (also see #3043 and #3049)') Settings.PRECISE_F32 = 1 # SIMD currently requires Math.fround orig_args = self.emcc_args for mode in [[], ['-s', 'SIMD=1']]: self.emcc_args = orig_args + mode self.do_run(open(path_from_root('tests', 'test_sse1.cpp'), 'r').read(), 'Success!') # Tests the full SSE1 API. def test_sse1_full(self): return self.skip('TODO: This test fails due to bugs #2840, #3044, #3045, #3046 and #3048 (also see #3043 and #3049)') if SPIDERMONKEY_ENGINE not in JS_ENGINES: return self.skip('test_sse1_full requires SpiderMonkey to run.') Popen([CLANG, path_from_root('tests', 'test_sse1_full.cpp'), '-o', 'test_sse1_full'] + get_clang_native_args(), stdout=PIPE, stderr=PIPE).communicate() native_result, err = Popen('./test_sse1_full', stdout=PIPE, stderr=PIPE).communicate() Settings.PRECISE_F32 = 1 # SIMD currently requires Math.fround orig_args = self.emcc_args for mode in [[], ['-s', 'SIMD=1']]: self.emcc_args = orig_args + mode + ['-I' + path_from_root('tests')] self.do_run(open(path_from_root('tests', 'test_sse1_full.cpp'), 'r').read(), native_result) def test_simd(self): if self.is_emterpreter(): return self.skip('todo') test_path = path_from_root('tests', 'core', 'test_simd') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_simd2(self): if self.is_emterpreter(): return self.skip('todo') test_path = path_from_root('tests', 'core', 'test_simd2') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_simd3(self): return self.skip('FIXME: this appears to be broken') test_path = path_from_root('tests', 'core', 'test_simd3') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_simd4(self): # test_simd4 is to test phi node handling of SIMD path if self.is_emterpreter(): return self.skip('todo') test_path = path_from_root('tests', 'core', 'test_simd4') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_simd5(self): # test_simd5 is to test shufflevector of SIMD path test_path = path_from_root('tests', 'core', 'test_simd5') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_simd6(self): # test_simd6 is to test x86 min and max intrinsics on NaN and -0.0 if self.is_emterpreter(): return self.skip('todo') test_path = path_from_root('tests', 'core', 'test_simd6') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_simd7(self): # test_simd7 is to test negative zero handling. if self.is_emterpreter(): return self.skip('todo') test_path = path_from_root('tests', 'core', 'test_simd7') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_simd8(self): # test_simd8 is to test unaligned load and store if self.is_emterpreter(): return self.skip('todo') test_path = path_from_root('tests', 'core', 'test_simd8') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_simd_dyncall(self): if self.is_emterpreter(): return self.skip('todo') test_path = path_from_root('tests', 'core', 'test_simd_dyncall') src, output = (test_path + s for s in ('.cpp', '.txt')) self.do_run_from_file(src, output) def test_gcc_unmangler(self): Building.COMPILER_TEST_OPTS += ['-I' + path_from_root('third_party'), '-Wno-warn-absolute-paths'] self.do_run(open(path_from_root('third_party', 'gcc_demangler.c')).read(), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj']) def test_lua(self): if self.emcc_args: self.emcc_args = ['-g1'] + self.emcc_args total_memory = Settings.TOTAL_MEMORY if self.is_emterpreter(): Settings.PRECISE_F32 = 1 for aggro in ([0, 1] if Settings.ASM_JS and '-O2' in self.emcc_args else [0]): Settings.AGGRESSIVE_VARIABLE_ELIMINATION = aggro Settings.TOTAL_MEMORY = total_memory print aggro self.do_run('', 'hello lua world!\n17\n1\n2\n3\n4\n7', args=['-e', '''print("hello lua world!");print(17);for x = 1,4 do print(x) end;print(10-3)'''], libraries=self.get_library('lua', [os.path.join('src', 'lua'), os.path.join('src', 'liblua.a')], make=['make', 'generic'], configure=None), includes=[path_from_root('tests', 'lua')], output_nicerizer=lambda string, err: (string + err).replace('\n\n', '\n').replace('\n\n', '\n')) def get_freetype(self): Settings.DEAD_FUNCTIONS += ['_inflateEnd', '_inflate', '_inflateReset', '_inflateInit2_'] return self.get_library('freetype', os.path.join('objs', '.libs', 'libfreetype.a')) def test_freetype(self): assert 'asm2g' in test_modes if self.run_name == 'asm2g': Settings.ALIASING_FUNCTION_POINTERS = 1 - Settings.ALIASING_FUNCTION_POINTERS # flip for some more coverage here post = ''' def process(filename): import tools.shared as shared # Embed the font into the document src = open(filename, 'r').read().replace( '// {{PRE_RUN_ADDITIONS}}', "FS.createDataFile('/', 'font.ttf', %s, true, false);" % str( map(ord, open(shared.path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), 'rb').read()) ) ) open(filename, 'w').write(src) ''' # Not needed for js, but useful for debugging shutil.copyfile(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), os.path.join(self.get_dir(), 'font.ttf')) # Main for outlining in [0, 5000]: Settings.OUTLINING_LIMIT = outlining print >> sys.stderr, 'outlining:', outlining self.do_run(open(path_from_root('tests', 'freetype', 'main.c'), 'r').read(), open(path_from_root('tests', 'freetype', 'ref.txt'), 'r').read(), ['font.ttf', 'test!', '150', '120', '25'], libraries=self.get_freetype(), includes=[path_from_root('tests', 'freetype', 'include')], post_build=post) # github issue 324 print '[issue 324]' self.do_run(open(path_from_root('tests', 'freetype', 'main_2.c'), 'r').read(), open(path_from_root('tests', 'freetype', 'ref_2.txt'), 'r').read(), ['font.ttf', 'w', '32', '32', '25'], libraries=self.get_freetype(), includes=[path_from_root('tests', 'freetype', 'include')], post_build=post) print '[issue 324 case 2]' self.do_run(open(path_from_root('tests', 'freetype', 'main_3.c'), 'r').read(), open(path_from_root('tests', 'freetype', 'ref_3.txt'), 'r').read(), ['font.ttf', 'W', '32', '32', '0'], libraries=self.get_freetype(), includes=[path_from_root('tests', 'freetype', 'include')], post_build=post) print '[issue 324 case 3]' self.do_run('', open(path_from_root('tests', 'freetype', 'ref_4.txt'), 'r').read(), ['font.ttf', 'ea', '40', '32', '0'], no_build=True) def test_sqlite(self): # gcc -O3 -I/home/alon/Dev/emscripten/tests/sqlite -ldl src.c self.banned_js_engines = [NODE_JS] # OOM in older node if '-O' not in str(self.emcc_args): self.banned_js_engines += [SPIDERMONKEY_ENGINE] # SM bug 1066759 Settings.DISABLE_EXCEPTION_CATCHING = 1 Settings.EXPORTED_FUNCTIONS += ['_sqlite3_open', '_sqlite3_close', '_sqlite3_exec', '_sqlite3_free', '_callback']; if Settings.ASM_JS == 1 and '-g' in self.emcc_args: print "disabling inlining" # without registerize (which -g disables), we generate huge amounts of code Settings.INLINING_LIMIT = 50 #Settings.OUTLINING_LIMIT = 60000 self.do_run(r''' #define SQLITE_DISABLE_LFS #define LONGDOUBLE_TYPE double #define SQLITE_INT64_TYPE long long int #define SQLITE_THREADSAFE 0 ''' + open(path_from_root('tests', 'sqlite', 'sqlite3.c'), 'r').read() + open(path_from_root('tests', 'sqlite', 'benchmark.c'), 'r').read(), open(path_from_root('tests', 'sqlite', 'benchmark.txt'), 'r').read(), includes=[path_from_root('tests', 'sqlite')], force_c=True) def test_zlib(self): if '-O2' in self.emcc_args and 'ASM_JS=0' not in self.emcc_args: # without asm, closure minifies Math.imul badly self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage assert 'asm2g' in test_modes if self.run_name == 'asm2g': self.emcc_args += ['-g4'] # more source maps coverage use_cmake_configure = WINDOWS if use_cmake_configure: make_args = [] configure = [PYTHON, path_from_root('emcmake'), 'cmake', '.', '-DBUILD_SHARED_LIBS=OFF'] else: make_args = ['libz.a'] configure = ['sh', './configure'] self.do_run(open(path_from_root('tests', 'zlib', 'example.c'), 'r').read(), open(path_from_root('tests', 'zlib', 'ref.txt'), 'r').read(), libraries=self.get_library('zlib', os.path.join('libz.a'), make_args=make_args, configure=configure), includes=[path_from_root('tests', 'zlib'), os.path.join(self.get_dir(), 'building', 'zlib')], force_c=True) def test_the_bullet(self): # Called thus so it runs late in the alphabetical cycle... it is long Settings.DEAD_FUNCTIONS = ['__ZSt9terminatev'] asserts = Settings.ASSERTIONS for use_cmake in [False, True]: # If false, use a configure script to configure Bullet build. print 'cmake', use_cmake # Windows cannot run configure sh scripts. if WINDOWS and not use_cmake: continue Settings.ASSERTIONS = 2 if use_cmake else asserts # extra testing for ASSERTIONS == 2 def test(): self.do_run(open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp'), 'r').read(), [open(path_from_root('tests', 'bullet', 'output.txt'), 'r').read(), # different roundings open(path_from_root('tests', 'bullet', 'output2.txt'), 'r').read(), open(path_from_root('tests', 'bullet', 'output3.txt'), 'r').read()], libraries=get_bullet_library(self, use_cmake), includes=[path_from_root('tests', 'bullet', 'src')]) test() # TODO: test only worked in non-fastcomp (well, this section) continue assert 'asm2g' in test_modes if self.run_name == 'asm2g' and not use_cmake: # Test forced alignment print >> sys.stderr, 'testing FORCE_ALIGNED_MEMORY' old = open('src.cpp.o.js').read() Settings.FORCE_ALIGNED_MEMORY = 1 test() new = open('src.cpp.o.js').read() print len(old), len(new), old.count('tempBigInt'), new.count('tempBigInt') assert len(old) > len(new) assert old.count('tempBigInt') > new.count('tempBigInt') def test_poppler(self): Settings.NO_EXIT_RUNTIME = 1 Building.COMPILER_TEST_OPTS += [ '-I' + path_from_root('tests', 'freetype', 'include'), '-I' + path_from_root('tests', 'poppler', 'include'), '-Wno-warn-absolute-paths' ] Settings.INVOKE_RUN = 0 # We append code that does run() ourselves # See post(), below input_file = open(os.path.join(self.get_dir(), 'paper.pdf.js'), 'w') input_file.write(str(map(ord, open(path_from_root('tests', 'poppler', 'paper.pdf'), 'rb').read()))) input_file.close() post = ''' def process(filename): # To avoid loading this large file to memory and altering it, we simply append to the end src = open(filename, 'a') src.write( \'\'\' FS.createDataFile('/', 'paper.pdf', eval(Module.read('paper.pdf.js')), true, false); Module.callMain(Module.arguments); Module.print("Data: " + JSON.stringify(MEMFS.getFileDataAsRegularArray(FS.root.contents['filename-1.ppm']).map(function(x) { return unSign(x, 8) }))); \'\'\' ) src.close() ''' #fontconfig = self.get_library('fontconfig', [os.path.join('src', '.libs', 'libfontconfig.a')]) # Used in file, but not needed, mostly freetype = self.get_freetype() poppler = self.get_library('poppler', [os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')], env_init={ 'FONTCONFIG_CFLAGS': ' ', 'FONTCONFIG_LIBS': ' ' }, configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--enable-shared=no']) # Combine libraries combined = os.path.join(self.get_dir(), 'poppler-combined.bc') Building.link(poppler + freetype, combined) self.do_ll_run(combined, map(ord, open(path_from_root('tests', 'poppler', 'ref.ppm'), 'r').read()).__str__().replace(' ', ''), args='-scale-to 512 paper.pdf filename'.split(' '), post_build=post) #, build_ll_hook=self.do_autodebug) def test_openjpeg(self): Building.COMPILER_TEST_OPTS = filter(lambda x: x != '-g', Building.COMPILER_TEST_OPTS) # remove -g, so we have one test without it by default post = ''' def process(filename): import tools.shared as shared original_j2k = shared.path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.j2k') src = open(filename, 'r').read().replace( '// {{PRE_RUN_ADDITIONS}}', "FS.createDataFile('/', 'image.j2k', %s, true, false);" % shared.line_splitter(str( map(ord, open(original_j2k, 'rb').read()) )) ).replace( '// {{POST_RUN_ADDITIONS}}', "Module.print('Data: ' + JSON.stringify(MEMFS.getFileDataAsRegularArray(FS.analyzePath('image.raw').object)));" ) open(filename, 'w').write(src) ''' shutil.copy(path_from_root('tests', 'openjpeg', 'opj_config.h'), self.get_dir()) lib = self.get_library('openjpeg', [os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/index.c.o'.split('/')), os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/convert.c.o'.split('/')), os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/__/common/color.c.o'.split('/')), os.path.join('bin', 'libopenjpeg.so.1.4.0')], configure=['cmake', '.'], #configure_args=['--enable-tiff=no', '--enable-jp3d=no', '--enable-png=no'], make_args=[]) # no -j 2, since parallel builds can fail # We use doubles in JS, so we get slightly different values than native code. So we # check our output by comparing the average pixel difference def image_compare(output, err): # Get the image generated by JS, from the JSON.stringify'd array m = re.search('\[[\d, -]*\]', output) try: js_data = eval(m.group(0)) except AttributeError: print 'Failed to find proper image output in: ' + output raise js_data = map(lambda x: x if x >= 0 else 256+x, js_data) # Our output may be signed, so unsign it # Get the correct output true_data = open(path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.raw'), 'rb').read() # Compare them assert(len(js_data) == len(true_data)) num = len(js_data) diff_total = js_total = true_total = 0 for i in range(num): js_total += js_data[i] true_total += ord(true_data[i]) diff_total += abs(js_data[i] - ord(true_data[i])) js_mean = js_total/float(num) true_mean = true_total/float(num) diff_mean = diff_total/float(num) image_mean = 83.265 #print '[image stats:', js_mean, image_mean, true_mean, diff_mean, num, ']' assert abs(js_mean - image_mean) < 0.01 assert abs(true_mean - image_mean) < 0.01 assert diff_mean < 0.01 return output self.emcc_args += ['--minify', '0'] # to compare the versions Settings.NO_EXIT_RUNTIME = 1 def do_test(): self.do_run(open(path_from_root('tests', 'openjpeg', 'codec', 'j2k_to_image.c'), 'r').read(), 'Successfully generated', # The real test for valid output is in image_compare '-i image.j2k -o image.raw'.split(' '), libraries=lib, includes=[path_from_root('tests', 'openjpeg', 'libopenjpeg'), path_from_root('tests', 'openjpeg', 'codec'), path_from_root('tests', 'openjpeg', 'common'), os.path.join(self.get_build_dir(), 'openjpeg')], force_c=True, post_build=post, output_nicerizer=image_compare)#, build_ll_hook=self.do_autodebug) do_test() # some test coverage for EMCC_DEBUG 1 and 2 assert 'asm2g' in test_modes if self.run_name == 'asm2g': shutil.copyfile('src.c.o.js', 'release.js') try: os.environ['EMCC_DEBUG'] = '1' print '2' do_test() shutil.copyfile('src.c.o.js', 'debug1.js') os.environ['EMCC_DEBUG'] = '2' print '3' do_test() shutil.copyfile('src.c.o.js', 'debug2.js') finally: del os.environ['EMCC_DEBUG'] for debug in [1,2]: def clean(text): text = text.replace('\n\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n').replace('{\n}', '{}') return '\n'.join(sorted(text.split('\n'))) sizes = len(open('release.js').read()), len(open('debug%d.js' % debug).read()) print >> sys.stderr, debug, 'sizes', sizes assert abs(sizes[0] - sizes[1]) < 0.001*sizes[0], sizes # we can't check on identical output, compilation is not 100% deterministic (order of switch elements, etc.), but size should be ~identical print >> sys.stderr, 'debug check %d passed too' % debug try: os.environ['EMCC_FORCE_STDLIBS'] = '1' print 'EMCC_FORCE_STDLIBS' do_test() finally: del os.environ['EMCC_FORCE_STDLIBS'] print >> sys.stderr, 'EMCC_FORCE_STDLIBS ok' try_delete(CANONICAL_TEMP_DIR) else: print >> sys.stderr, 'not doing debug check' if Settings.ALLOW_MEMORY_GROWTH == 1: # extra testing print >> sys.stderr, 'no memory growth' Settings.ALLOW_MEMORY_GROWTH = 0 do_test() def test_python(self): Settings.EMULATE_FUNCTION_POINTER_CASTS = 1 bitcode = path_from_root('tests', 'python', 'python.bc') pyscript = dedent('''\ print '***' print "hello python world!" print [x*2 for x in range(4)] t=2 print 10-3-t print (lambda x: x*2)(11) print '%f' % 5.47 print {1: 2}.keys() print '***' ''') pyoutput = '***\nhello python world!\n[0, 2, 4, 6]\n5\n22\n5.470000\n[1]\n***' for lto in [0, 1]: print 'lto:', lto if lto == 1: self.emcc_args += ['--llvm-lto', '1'] self.do_ll_run(bitcode, pyoutput, args=['-S', '-c', pyscript]) def test_lifetime(self): self.do_ll_run(path_from_root('tests', 'lifetime.ll'), 'hello, world!\n') if '-O1' in self.emcc_args or '-O2' in self.emcc_args: assert 'a18' not in open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read(), 'lifetime stuff and their vars must be culled' # Test cases in separate files. Note that these files may contain invalid .ll! # They are only valid enough for us to read for test purposes, not for llvm-as # to process. def test_cases(self): if Building.LLVM_OPTS: return self.skip("Our code is not exactly 'normal' llvm assembly") emcc_args = self.emcc_args # The following tests link to libc, and must be run with EMCC_LEAVE_INPUTS_RAW = 0 need_no_leave_inputs_raw = ['muli33_ta2', 'philoop_ta2'] try: for name in glob.glob(path_from_root('tests', 'cases', '*.ll')): shortname = name.replace('.ll', '') if '' not in shortname: continue # TODO: test only worked in non-fastcomp (well, these cases) if os.path.basename(shortname) in [ 'aliasbitcast', 'structparam', 'issue_39', 'phinonexist', 'oob_ta2', 'phiself', 'invokebitcast', # invalid ir 'structphiparam', 'callwithstructural_ta2', 'callwithstructural64_ta2', 'structinparam', # pnacl limitations in ExpandStructRegs '2xi40', # pnacl limitations in ExpandGetElementPtr 'quoted', # current fastcomp limitations FIXME 'atomicrmw_unaligned', # TODO XXX ]: continue if self.is_emterpreter() and os.path.basename(shortname) in ['funcptr']: continue # test writes to memory we store out bytecode! test is invalid if os.path.basename(shortname) in need_no_leave_inputs_raw: if 'EMCC_LEAVE_INPUTS_RAW' in os.environ: del os.environ['EMCC_LEAVE_INPUTS_RAW'] else: os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1' if '_noasm' in shortname and Settings.ASM_JS: print self.skip('case "%s" not relevant for asm.js' % shortname) continue self.emcc_args = emcc_args if os.path.exists(shortname + '.emcc'): if not self.emcc_args: continue self.emcc_args = self.emcc_args + json.loads(open(shortname + '.emcc').read()) print >> sys.stderr, "Testing case '%s'..." % shortname output_file = path_from_root('tests', 'cases', shortname + '.txt') if os.path.exists(output_file): output = open(output_file, 'r').read() else: output = 'hello, world!' if output.rstrip() != 'skip': self.do_ll_run(path_from_root('tests', 'cases', name), output) # Optional source checking, a python script that gets a global generated with the source src_checker = path_from_root('tests', 'cases', shortname + '.py') if os.path.exists(src_checker): generated = open('src.cpp.o.js').read() exec(open(src_checker).read()) finally: if 'EMCC_LEAVE_INPUTS_RAW' in os.environ: del os.environ['EMCC_LEAVE_INPUTS_RAW'] self.emcc_args = emcc_args def test_fuzz(self): Building.COMPILER_TEST_OPTS += ['-I' + path_from_root('tests', 'fuzz', 'include'), '-Wno-warn-absolute-paths'] def run_all(x): print x for name in glob.glob(path_from_root('tests', 'fuzz', '*.c')) + glob.glob(path_from_root('tests', 'fuzz', '*.cpp')): #if os.path.basename(name) != '4.c': continue if 'newfail' in name: continue if os.path.basename(name).startswith('temp_fuzzcode'): continue if x == 'lto' and self.run_name in ['default', 'asm2f'] and os.path.basename(name) in [ '8.c' # pnacl legalization issue, see https://code.google.com/p/nativeclient/issues/detail?id=4027 ]: continue if x == 'lto' and self.run_name == 'default' and os.path.basename(name) in [ '19.c', '18.cpp', # LLVM LTO bug ]: continue if x == 'lto' and os.path.basename(name) in [ '21.c' ]: continue # LLVM LTO bug print name self.do_run(open(path_from_root('tests', 'fuzz', name)).read(), open(path_from_root('tests', 'fuzz', name + '.txt')).read(), force_c=name.endswith('.c')) run_all('normal') self.emcc_args += ['--llvm-lto', '1'] run_all('lto') # Autodebug the code def do_autodebug(self, filename): Building.llvm_dis(filename) output = Popen([PYTHON, AUTODEBUGGER, filename+'.o.ll', filename+'.o.ll.ll'], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0] assert 'Success.' in output, output self.prep_ll_run(filename, filename+'.o.ll.ll', force_recompile=True) # rebuild .bc # TODO: use code in do_autodebug_post for this # Autodebug the code, after LLVM opts. Will only work once! def do_autodebug_post(self, filename): if not hasattr(self, 'post'): print 'Asking for post re-call' self.post = True return True print 'Autodebugging during post time' delattr(self, 'post') output = Popen([PYTHON, AUTODEBUGGER, filename+'.o.ll', filename+'.o.ll.ll'], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0] assert 'Success.' in output, output shutil.copyfile(filename + '.o.ll.ll', filename + '.o.ll') Building.llvm_as(filename) Building.llvm_dis(filename) def test_autodebug(self): if Building.LLVM_OPTS: return self.skip('LLVM opts mess us up') Building.COMPILER_TEST_OPTS += ['--llvm-opts', '0'] # Run a test that should work, generating some code test_path = path_from_root('tests', 'core', 'test_structs') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, build_ll_hook=lambda x: False) # add an ll hook, to force ll generation filename = os.path.join(self.get_dir(), 'src.cpp') self.do_autodebug(filename) # Compare to each other, and to expected output self.do_ll_run(path_from_root('tests', filename+'.o.ll.ll'), '''AD:-1,1''') assert open('stdout').read().startswith('AD:-1'), 'We must note when we enter functions' # Test using build_ll_hook src = ''' #include <stdio.h> char cache[256], *next = cache; int main() { cache[10] = 25; next[20] = 51; int x = cache[10]; double y = 11.52; printf("*%d,%d,%.2f*\\n", x, cache[20], y); return 0; } ''' self.do_run(src, '''AD:-1,1''', build_ll_hook=self.do_autodebug) ### Integration tests def test_ccall(self): post = ''' def process(filename): src = \'\'\' var Module = { 'noInitialRun': true }; \'\'\' + open(filename, 'r').read() + \'\'\' Module.addOnExit(function () { Module.print('*'); var ret; ret = Module['ccall']('get_int', 'number'); Module.print([typeof ret, ret]); ret = ccall('get_float', 'number'); Module.print([typeof ret, ret.toFixed(2)]); ret = ccall('get_string', 'string'); Module.print([typeof ret, ret]); ret = ccall('print_int', null, ['number'], [12]); Module.print(typeof ret); ret = ccall('print_float', null, ['number'], [14.56]); Module.print(typeof ret); ret = ccall('print_string', null, ['string'], ["cheez"]); Module.print(typeof ret); ret = ccall('print_string', null, ['array'], [[97, 114, 114, 45, 97, 121, 0]]); Module.print(typeof ret); ret = ccall('multi', 'number', ['number', 'number', 'number', 'string'], [2, 1.4, 3, 'more']); Module.print([typeof ret, ret]); var p = ccall('malloc', 'pointer', ['number'], [4]); setValue(p, 650, 'i32'); ret = ccall('pointer', 'pointer', ['pointer'], [p]); Module.print([typeof ret, getValue(ret, 'i32')]); Module.print('*'); // part 2: cwrap var noThirdParam = Module['cwrap']('get_int', 'number'); Module.print(noThirdParam()); var multi = Module['cwrap']('multi', 'number', ['number', 'number', 'number', 'string']); Module.print(multi(2, 1.4, 3, 'atr')); Module.print(multi(8, 5.4, 4, 'bret')); Module.print('*'); // part 3: avoid stack explosion and check it's restored correctly for (var i = 0; i < TOTAL_STACK/60; i++) { ccall('multi', 'number', ['number', 'number', 'number', 'string'], [0, 0, 0, '123456789012345678901234567890123456789012345678901234567890']); } Module.print('stack is ok.'); ccall('call_ccall_again', null); }); Module.callMain(); \'\'\' open(filename, 'w').write(src) ''' Settings.EXPORTED_FUNCTIONS += ['_get_int', '_get_float', '_get_string', '_print_int', '_print_float', '_print_string', '_multi', '_pointer', '_call_ccall_again', '_malloc'] test_path = path_from_root('tests', 'core', 'test_ccall') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output, post_build=post) if '-O2' in self.emcc_args or self.is_emterpreter(): print 'with closure' self.emcc_args += ['--closure', '1'] self.do_run_from_file(src, output, post_build=post) def test_dead_functions(self): src = r''' #include <stdio.h> extern "C" { __attribute__((noinline)) int unused(int x) { return x; } } int main(int argc, char **argv) { printf("*%d*\n", argc > 1 ? unused(1) : 2); return 0; } ''' def test(expected, args=[], no_build=False): self.do_run(src, expected, args=args, no_build=no_build) return open(self.in_dir('src.cpp.o.js')).read() # Sanity check that it works and the dead function is emitted js = test('*1*', ['x']) test('*2*', no_build=True) if self.run_name in ['default', 'asm1', 'asm2g']: assert 'function _unused($' in js # Kill off the dead function, and check a code path using it aborts Settings.DEAD_FUNCTIONS = ['_unused'] test('*2*') test('abort(-1) at', args=['x'], no_build=True) # Kill off a library function, check code aborts Settings.DEAD_FUNCTIONS = ['_printf'] test('abort(-1) at') test('abort(-1) at', args=['x'], no_build=True) def test_pgo(self): if Settings.ASM_JS: return self.skip('PGO does not work in asm mode') def run_all(name, src): print name def test(expected, args=[], no_build=False): self.do_run(src, expected, args=args, no_build=no_build) return open(self.in_dir('src.cpp.o.js')).read() # Sanity check that it works and the dead function is emitted js = test('*9*') assert 'function _unused(' in js # Run with PGO, see that unused is true to its name Settings.PGO = 1 test("*9*\n-s DEAD_FUNCTIONS='[\"_free\",\"_unused\"]'") Settings.PGO = 0 # Kill off the dead function, still works and it is not emitted Settings.DEAD_FUNCTIONS = ['_unused'] js = test('*9*') assert 'function _unused($' not in js # no compiled code assert 'function _unused(' in js # lib-generated stub Settings.DEAD_FUNCTIONS = [] # Run the same code with argc that uses the dead function, see abort test(('dead function: unused'), args=['a', 'b'], no_build=True) # Normal stuff run_all('normal', r''' #include <stdio.h> extern "C" { int used(int x) { if (x == 0) return -1; return used(x/3) + used(x/17) + x%5; } int unused(int x) { if (x == 0) return -1; return unused(x/4) + unused(x/23) + x%7; } } int main(int argc, char **argv) { printf("*%d*\n", argc == 3 ? unused(argv[0][0] + 1024) : used(argc + 1555)); return 0; } ''') # Call by function pointer run_all('function pointers', r''' #include <stdio.h> extern "C" { int used(int x) { if (x == 0) return -1; return used(x/3) + used(x/17) + x%5; } int unused(int x) { if (x == 0) return -1; return unused(x/4) + unused(x/23) + x%7; } } typedef int (*ii)(int); int main(int argc, char **argv) { ii pointers[256]; for (int i = 0; i < 256; i++) { pointers[i] = (i == 3) ? unused : used; } printf("*%d*\n", pointers[argc](argc + 1555)); return 0; } ''') # TODO: test only worked in non-fastcomp def test_asm_pgo(self): return self.skip('non-fastcomp is deprecated and fails in 3.5') src = open(path_from_root('tests', 'hello_libcxx.cpp')).read() output = 'hello, world!' self.do_run(src, output) shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('normal.js')) Settings.ASM_JS = 0 Settings.PGO = 1 self.do_run(src, output) Settings.ASM_JS = 1 Settings.PGO = 0 shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('pgo.js')) pgo_output = run_js(self.in_dir('pgo.js')).split('\n')[1] open('pgo_data.rsp', 'w').write(pgo_output) # with response file self.emcc_args += ['@pgo_data.rsp'] self.do_run(src, output) self.emcc_args.pop() shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('pgoed.js')) before = len(open('normal.js').read()) after = len(open('pgoed.js').read()) assert after < 0.90 * before, [before, after] # expect a size reduction # with response in settings element itself open('dead_funcs', 'w').write(pgo_output[pgo_output.find('['):-1]) self.emcc_args += ['-s', 'DEAD_FUNCTIONS=@' + self.in_dir('dead_funcs')] self.do_run(src, output) self.emcc_args.pop() self.emcc_args.pop() shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('pgoed2.js')) assert open('pgoed.js').read() == open('pgoed2.js').read() # with relative response in settings element itself open('dead_funcs', 'w').write(pgo_output[pgo_output.find('['):-1]) self.emcc_args += ['-s', 'DEAD_FUNCTIONS=@dead_funcs'] self.do_run(src, output) self.emcc_args.pop() self.emcc_args.pop() shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('pgoed2.js')) assert open('pgoed.js').read() == open('pgoed2.js').read() def test_exported_response(self): src = r''' #include <stdio.h> #include <stdlib.h> #include <emscripten.h> extern "C" { int other_function() { return 5; } } int main() { int x = EM_ASM_INT_V({ return Module._other_function() }); emscripten_run_script_string(""); // Add a reference to a symbol that exists in src/deps_info.json to uncover issue #2836 in the test suite. printf("waka %d!\n", x); return 0; } ''' open('exps', 'w').write('["_main","_other_function"]') self.emcc_args += ['-s', 'EXPORTED_FUNCTIONS=@exps'] self.do_run(src, '''waka 5!''') assert 'other_function' in open('src.cpp.o.js').read() def test_add_function(self): Settings.INVOKE_RUN = 0 Settings.RESERVED_FUNCTION_POINTERS = 1 src = r''' #include <stdio.h> #include <stdlib.h> #include <emscripten.h> int main(int argc, char **argv) { int fp = atoi(argv[1]); printf("fp: %d\n", fp); void (*f)(int) = reinterpret_cast<void (*)(int)>(fp); f(7); EM_ASM_(Module['Runtime']['removeFunction']($0), f); printf("ok\n"); return 0; } ''' open(os.path.join(self.get_dir(), 'post.js'), 'w').write(''' var newFuncPtr = Runtime.addFunction(function(num) { Module.print('Hello ' + num + ' from JS!'); }); Module.callMain([newFuncPtr.toString()]); ''') expected = '''Hello 7 from JS!\nok\n''' self.emcc_args += ['--post-js', 'post.js'] self.do_run(src, expected) if Settings.ASM_JS: Settings.RESERVED_FUNCTION_POINTERS = 0 self.do_run(src, '''Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.''') generated = open('src.cpp.o.js').read() assert 'jsCall' not in generated Settings.RESERVED_FUNCTION_POINTERS = 1 Settings.ALIASING_FUNCTION_POINTERS = 1 - Settings.ALIASING_FUNCTION_POINTERS # flip the test self.do_run(src, expected) assert 'asm2' in test_modes if self.run_name == 'asm2': print 'closure' self.banned_js_engines = [NODE_JS] # weird global handling in node self.emcc_args += ['--closure', '1'] self.do_run(src, expected) print 'function pointer emulation' Settings.RESERVED_FUNCTION_POINTERS = 0 Settings.EMULATED_FUNCTION_POINTERS = 1 # with emulation, we don't need to reserve self.do_run(src, expected) def test_getFuncWrapper_sig_alias(self): src = r''' #include <stdio.h> #include <emscripten.h> void func1(int a) { printf("func1\n"); } void func2(int a, int b) { printf("func2\n"); } int main() { EM_ASM_INT({ Runtime.getFuncWrapper($0, 'vi')(0); Runtime.getFuncWrapper($1, 'vii')(0, 0); }, func1, func2); return 0; } ''' self.do_run(src, 'func1\nfunc2\n') def test_emulate_function_pointer_casts(self): Settings.EMULATE_FUNCTION_POINTER_CASTS = 1 src = r''' #include <stdio.h> #include <math.h> typedef double (*ddd)(double x, double unused); typedef int (*iii)(int x, int unused); int main() { volatile ddd d = (ddd)acos; volatile iii i = (iii)acos; printf("|%.3f,%d|\n", d(0.3, 0.6), i(0, 0)); return 0; } ''' self.do_run(src, '|1.266,1|\n') def test_demangle_stacks(self): if Settings.ASM_JS: return self.skip('spidermonkey has stack trace issues') test_path = path_from_root('tests', 'core', 'test_demangle_stacks') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_tracing(self): Building.COMPILER_TEST_OPTS += ['--tracing'] test_path = path_from_root('tests', 'core', 'test_tracing') src, output = (test_path + s for s in ('.in', '.out')) self.do_run_from_file(src, output) def test_embind(self): Building.COMPILER_TEST_OPTS += ['--bind'] src = r''' #include<stdio.h> #include<emscripten/val.h> using namespace emscripten; int main() { val Math = val::global("Math"); // two ways to call Math.abs printf("abs(-10): %d\n", Math.call<int>("abs", -10)); printf("abs(-11): %d\n", Math["abs"](-11).as<int>()); return 0; } ''' self.do_run(src, 'abs(-10): 10\nabs(-11): 11'); def test_embind_2(self): Settings.NO_EXIT_RUNTIME = 1 # we emit some post.js that we need to see Building.COMPILER_TEST_OPTS += ['--bind', '--post-js', 'post.js'] open('post.js', 'w').write(''' Module.print('lerp ' + Module.lerp(100, 200, 66) + '.'); ''') src = r''' #include <stdio.h> #include <emscripten/bind.h> using namespace emscripten; int lerp(int a, int b, int t) { return (100 - t) * a + t * b; } EMSCRIPTEN_BINDINGS(my_module) { function("lerp", &lerp); } int main(int argc, char **argv) { return 0; } ''' self.do_run(src, 'lerp 166'); def test_scriptaclass(self): Settings.EXPORT_BINDINGS = 1 header_filename = os.path.join(self.get_dir(), 'header.h') header = ''' struct ScriptMe { int value; ScriptMe(int val); int getVal(); // XXX Sadly, inlining these will result in LLVM not // producing any code for them (when just building // as a library) void mulVal(int mul); }; ''' h = open(header_filename, 'w') h.write(header) h.close() src = ''' #include "header.h" ScriptMe::ScriptMe(int val) : value(val) { } int ScriptMe::getVal() { return value; } void ScriptMe::mulVal(int mul) { value *= mul; } ''' # Way 1: use demangler and namespacer script_src = ''' var sme = Module._.ScriptMe.__new__(83); // malloc(sizeof(ScriptMe)), ScriptMe::ScriptMe(sme, 83) / new ScriptMe(83) (at addr sme) Module._.ScriptMe.mulVal(sme, 2); // ScriptMe::mulVal(sme, 2) sme.mulVal(2) Module.print('*' + Module._.ScriptMe.getVal(sme) + '*'); _free(sme); Module.print('*ok*'); ''' post = ''' def process(filename): Popen([PYTHON, DEMANGLER, filename], stdout=open(filename + '.tmp', 'w')).communicate() Popen([PYTHON, NAMESPACER, filename, filename + '.tmp'], stdout=open(filename + '.tmp2', 'w')).communicate() src = open(filename, 'r').read().replace( '// {{MODULE_ADDITIONS}', 'Module["_"] = ' + open(filename + '.tmp2', 'r').read().replace('var ModuleNames = ', '').rstrip() + ';\n\n' + script_src + '\n\n' + '// {{MODULE_ADDITIONS}' ) open(filename, 'w').write(src) ''' # XXX disable due to possible v8 bug -- self.do_run(src, '*166*\n*ok*', post_build=post) if '-O2' in self.emcc_args and 'ASM_JS=0' not in self.emcc_args: # without asm, closure minifies Math.imul badly self.emcc_args += ['--closure', '1'] # Use closure here, to test we export things right # Way 2: use CppHeaderParser header = ''' #include <stdio.h> class Parent { protected: int value; public: Parent(int val); Parent(Parent *p, Parent *q); // overload constructor int getVal() { return value; }; // inline should work just fine here, unlike Way 1 before void mulVal(int mul); }; class Child1 : public Parent { public: Child1() : Parent(7) { printf("Child1:%d\\n", value); }; Child1(int val) : Parent(val*2) { value -= 1; printf("Child1:%d\\n", value); }; int getValSqr() { return value*value; } int getValSqr(int more) { return value*value*more; } int getValTimes(int times=1) { return value*times; } }; class Child2 : public Parent { public: Child2() : Parent(9) { printf("Child2:%d\\n", value); }; int getValCube() { return value*value*value; } static void printStatic() { printf("*static*\\n"); } virtual void virtualFunc() { printf("*virtualf*\\n"); } virtual void virtualFunc2() { printf("*virtualf2*\\n"); } static void runVirtualFunc(Child2 *self) { self->virtualFunc(); }; private: void doSomethingSecret() { printf("security breached!\\n"); }; // we should not be able to do this }; ''' open(header_filename, 'w').write(header) basename = os.path.join(self.get_dir(), 'bindingtest') output = Popen([PYTHON, BINDINGS_GENERATOR, basename, header_filename], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0] #print output assert 'Traceback' not in output, 'Failure in binding generation: ' + output src = ''' #include "header.h" Parent::Parent(int val) : value(val) { printf("Parent:%d\\n", val); } Parent::Parent(Parent *p, Parent *q) : value(p->value + q->value) { printf("Parent:%d\\n", value); } void Parent::mulVal(int mul) { value *= mul; } #include "bindingtest.cpp" ''' post2 = ''' def process(filename): src = open(filename, 'a') src.write(open('bindingtest.js').read() + '\\n\\n') src.close() ''' def post3(filename): script_src_2 = ''' var sme = new Module.Parent(42); sme.mulVal(2); Module.print('*') Module.print(sme.getVal()); Module.print('c1'); var c1 = new Module.Child1(); Module.print(c1.getVal()); c1.mulVal(2); Module.print(c1.getVal()); Module.print(c1.getValSqr()); Module.print(c1.getValSqr(3)); Module.print(c1.getValTimes()); // default argument should be 1 Module.print(c1.getValTimes(2)); Module.print('c1 v2'); c1 = new Module.Child1(8); // now with a parameter, we should handle the overloading automatically and properly and use constructor #2 Module.print(c1.getVal()); c1.mulVal(2); Module.print(c1.getVal()); Module.print(c1.getValSqr()); Module.print(c1.getValSqr(3)); Module.print('c2') var c2 = new Module.Child2(); Module.print(c2.getVal()); c2.mulVal(2); Module.print(c2.getVal()); Module.print(c2.getValCube()); var succeeded; try { succeeded = 0; Module.print(c2.doSomethingSecret()); // should fail since private succeeded = 1; } catch(e) {} Module.print(succeeded); try { succeeded = 0; Module.print(c2.getValSqr()); // function from the other class succeeded = 1; } catch(e) {} Module.print(succeeded); try { succeeded = 0; c2.getValCube(); // sanity succeeded = 1; } catch(e) {} Module.print(succeeded); Module.Child2.prototype.printStatic(); // static calls go through the prototype // virtual function c2.virtualFunc(); Module.Child2.prototype.runVirtualFunc(c2); c2.virtualFunc2(); // extend the class from JS var c3 = new Module.Child2; Module.customizeVTable(c3, [{ original: Module.Child2.prototype.virtualFunc, replacement: function() { Module.print('*js virtualf replacement*'); } }, { original: Module.Child2.prototype.virtualFunc2, replacement: function() { Module.print('*js virtualf2 replacement*'); } }]); c3.virtualFunc(); Module.Child2.prototype.runVirtualFunc(c3); c3.virtualFunc2(); c2.virtualFunc(); // original should remain the same Module.Child2.prototype.runVirtualFunc(c2); c2.virtualFunc2(); Module.print('*ok*'); ''' code = open(filename).read() src = open(filename, 'w') src.write('var Module = {};\n') # name Module src.write(code) src.write(script_src_2 + '\n') src.close() Settings.RESERVED_FUNCTION_POINTERS = 20 self.do_run(src, '''* 84 c1 Parent:7 Child1:7 7 14 196 588 14 28 c1 v2 Parent:16 Child1:15 15 30 900 2700 c2 Parent:9 Child2:9 9 18 5832 0 0 1 *static* *virtualf* *virtualf* *virtualf2*''' + (''' Parent:9 Child2:9 *js virtualf replacement* *js virtualf replacement* *js virtualf2 replacement* *virtualf* *virtualf* *virtualf2*''') + ''' *ok* ''', post_build=(post2, post3)) def test_scriptaclass_2(self): Settings.EXPORT_BINDINGS = 1 header_filename = os.path.join(self.get_dir(), 'header.h') header = ''' #include <stdio.h> #include <string.h> class StringUser { char *s; int i; public: StringUser(char *string, int integer) : s(strdup(string)), i(integer) {} void Print(int anotherInteger, char *anotherString) { printf("|%s|%d|%s|%d|\\n", s, i, anotherString, anotherInteger); } void CallOther(StringUser *fr) { fr->Print(i, s); } }; ''' open(header_filename, 'w').write(header) basename = os.path.join(self.get_dir(), 'bindingtest') output = Popen([PYTHON, BINDINGS_GENERATOR, basename, header_filename], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0] #print output assert 'Traceback' not in output, 'Failure in binding generation: ' + output src = ''' #include "header.h" #include "bindingtest.cpp" ''' post = ''' def process(filename): src = open(filename, 'a') src.write(open('bindingtest.js').read() + '\\n\\n') src.write(\'\'\' var user = new Module.StringUser("hello", 43); user.Print(41, "world"); \'\'\') src.close() ''' self.do_run(src, '|hello|43|world|41|', post_build=post) def test_webidl(self): assert 'asm2' in test_modes if self.run_name == 'asm2': self.emcc_args += ['--closure', '1', '-g1'] # extra testing Settings.MODULARIZE = 1 # avoid closure minified names competing with our test code in the global name space def do_test_in_mode(mode): print 'testing mode', mode # Force IDL checks mode os.environ['IDL_CHECKS'] = mode output = Popen([PYTHON, path_from_root('tools', 'webidl_binder.py'), path_from_root('tests', 'webidl', 'test.idl'), 'glue']).communicate()[0] assert os.path.exists('glue.cpp') assert os.path.exists('glue.js') # Export things on "TheModule". This matches the typical use pattern of the bound library # being used as Box2D.* or Ammo.*, and we cannot rely on "Module" being always present (closure may remove it). open('export.js', 'w').write(''' // test purposes: remove printErr output, whose order is unpredictable when compared to print Module.printErr = Module['printErr'] = function(){}; ''') self.emcc_args += ['--post-js', 'glue.js', '--post-js', 'export.js'] shutil.copyfile(path_from_root('tests', 'webidl', 'test.h'), self.in_dir('test.h')) shutil.copyfile(path_from_root('tests', 'webidl', 'test.cpp'), self.in_dir('test.cpp')) src = open('test.cpp').read() def post(filename): src = open(filename, 'a') src.write('\n\n') if self.run_name == 'asm2': src.write('var TheModule = Module();\n') else: src.write('var TheModule = Module;\n') src.write('\n\n') src.write(open(path_from_root('tests', 'webidl', 'post.js')).read()) src.write('\n\n') src.close() self.do_run(src, open(path_from_root('tests', 'webidl', "output_%s.txt" % mode)).read(), post_build=(None, post), output_nicerizer=(lambda out, err: out)) do_test_in_mode('ALL') do_test_in_mode('FAST') do_test_in_mode('DEFAULT') ### Tests for tools def test_safe_heap(self): if not Settings.SAFE_HEAP: return self.skip('We need SAFE_HEAP to test SAFE_HEAP') # TODO: Should we remove this test? return self.skip('It is ok to violate the load-store assumption with TA2') if Building.LLVM_OPTS: return self.skip('LLVM can optimize away the intermediate |x|') src = ''' #include<stdio.h> #include<stdlib.h> int main() { int *x = (int*)malloc(sizeof(int)); *x = 20; float *y = (float*)x; printf("%f\\n", *y); printf("*ok*\\n"); return 0; } ''' try: self.do_run(src, '*nothingatall*', assert_returncode=None) except Exception, e: # This test *should* fail, by throwing this exception assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e) Settings.SAFE_HEAP = 1 # Linking multiple files should work too module = ''' #include<stdio.h> #include<stdlib.h> void callFunc() { int *x = (int*)malloc(sizeof(int)); *x = 20; float *y = (float*)x; printf("%f\\n", *y); } ''' module_name = os.path.join(self.get_dir(), 'module.cpp') open(module_name, 'w').write(module) main = ''' #include<stdio.h> #include<stdlib.h> extern void callFunc(); int main() { callFunc(); int *x = (int*)malloc(sizeof(int)); *x = 20; float *y = (float*)x; printf("%f\\n", *y); printf("*ok*\\n"); return 0; } ''' main_name = os.path.join(self.get_dir(), 'main.cpp') open(main_name, 'w').write(main) Building.emcc(module_name, ['-g']) Building.emcc(main_name, ['-g']) all_name = os.path.join(self.get_dir(), 'all.bc') Building.link([module_name + '.o', main_name + '.o'], all_name) try: self.do_ll_run(all_name, '*nothingatall*', assert_returncode=None) except Exception, e: # This test *should* fail, by throwing this exception assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e) def test_source_map(self): if self.is_emterpreter(): return self.skip('todo') if NODE_JS not in JS_ENGINES: return self.skip('sourcemapper requires Node to run') if '-g' not in Building.COMPILER_TEST_OPTS: Building.COMPILER_TEST_OPTS.append('-g') src = ''' #include <stdio.h> #include <assert.h> __attribute__((noinline)) int foo() { printf("hi"); // line 6 return 1; // line 7 } int main() { printf("%d", foo()); // line 11 return 0; // line 12 } ''' dirname = self.get_dir() src_filename = os.path.join(dirname, 'src.cpp') out_filename = os.path.join(dirname, 'a.out.js') no_maps_filename = os.path.join(dirname, 'no-maps.out.js') with open(src_filename, 'w') as f: f.write(src) assert '-g4' not in Building.COMPILER_TEST_OPTS Building.emcc(src_filename, Settings.serialize() + self.emcc_args + Building.COMPILER_TEST_OPTS, out_filename) # the file name may find its way into the generated code, so make sure we # can do an apples-to-apples comparison by compiling with the same file name shutil.move(out_filename, no_maps_filename) with open(no_maps_filename) as f: no_maps_file = f.read() no_maps_file = re.sub(' *//[@#].*$', '', no_maps_file, flags=re.MULTILINE) Building.COMPILER_TEST_OPTS.append('-g4') def build_and_check(): import json Building.emcc(src_filename, Settings.serialize() + self.emcc_args + Building.COMPILER_TEST_OPTS, out_filename, stderr=PIPE) # after removing the @line and @sourceMappingURL comments, the build # result should be identical to the non-source-mapped debug version. # this is worth checking because the parser AST swaps strings for token # objects when generating source maps, so we want to make sure the # optimizer can deal with both types. map_filename = out_filename + '.map' data = json.load(open(map_filename, 'r')) self.assertPathsIdentical(out_filename, data['file']) assert len(data['sources']) == 1, data['sources'] self.assertPathsIdentical(src_filename, data['sources'][0]) self.assertTextDataIdentical(src, data['sourcesContent'][0]) mappings = json.loads(jsrun.run_js( path_from_root('tools', 'source-maps', 'sourcemap2json.js'), tools.shared.NODE_JS, [map_filename])) seen_lines = set() for m in mappings: self.assertPathsIdentical(src_filename, m['source']) seen_lines.add(m['originalLine']) # ensure that all the 'meaningful' lines in the original code get mapped assert seen_lines.issuperset([6, 7, 11, 12]) build_and_check() assert 'asm2g' in test_modes if self.run_name == 'asm2g': # EMCC_DEBUG=2 causes lots of intermediate files to be written, and so # serves as a stress test for source maps because it needs to correlate # line numbers across all those files. old_emcc_debug = os.environ.get('EMCC_DEBUG', None) os.environ.pop('EMCC_DEBUG', None) try: os.environ['EMCC_DEBUG'] = '2' build_and_check() finally: if old_emcc_debug is not None: os.environ['EMCC_DEBUG'] = old_emcc_debug else: os.environ.pop('EMCC_DEBUG', None) def test_exception_source_map(self): if self.is_emterpreter(): return self.skip('todo') if '-g4' not in Building.COMPILER_TEST_OPTS: Building.COMPILER_TEST_OPTS.append('-g4') if NODE_JS not in JS_ENGINES: return self.skip('sourcemapper requires Node to run') src = ''' #include <stdio.h> __attribute__((noinline)) void foo(int i) { if (i < 10) throw i; // line 5 } #include <iostream> #include <string> int main() { std::string x = "ok"; // add libc++ stuff to make this big, test for #2410 int i; scanf("%d", &i); foo(i); std::cout << x << std::endl; return 0; } ''' def post(filename): import json map_filename = filename + '.map' mappings = json.loads(jsrun.run_js( path_from_root('tools', 'source-maps', 'sourcemap2json.js'), tools.shared.NODE_JS, [map_filename])) with open(filename) as f: lines = f.readlines() for m in mappings: if m['originalLine'] == 5 and '__cxa_throw' in lines[m['generatedLine']-1]: # -1 to fix 0-start vs 1-start return assert False, 'Must label throw statements with line numbers' dirname = self.get_dir() self.build(src, dirname, os.path.join(dirname, 'src.cpp'), post_build=(None, post)) def test_emscripten_log(self): if self.is_emterpreter(): self.emcc_args += ['--profiling-funcs'] # without this, stack traces are not useful (we jump emterpret=>emterpret) Building.COMPILER_TEST_OPTS += ['-DEMTERPRETER'] # even so, we get extra emterpret() calls on the stack if Settings.ASM_JS: # XXX Does not work in SpiderMonkey since callstacks cannot be captured when running in asm.js, see https://bugzilla.mozilla.org/show_bug.cgi?id=947996 self.banned_js_engines = [SPIDERMONKEY_ENGINE] if '-g' not in Building.COMPILER_TEST_OPTS: Building.COMPILER_TEST_OPTS.append('-g') Building.COMPILER_TEST_OPTS += ['-DRUN_FROM_JS_SHELL'] self.do_run(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read(), "Success!") def test_float_literals(self): self.do_run_from_file(path_from_root('tests', 'test_float_literals.cpp'), path_from_root('tests', 'test_float_literals.out')) def test_exit_status(self): src = r''' #include <stdio.h> #include <stdlib.h> static void cleanup() { printf("cleanup\n"); } int main() { atexit(cleanup); // this atexit should still be called printf("hello, world!\n"); exit(118); // Unusual exit status to make sure it's working! } ''' open('post.js', 'w').write(''' Module.addOnExit(function () { Module.print('I see exit status: ' + EXITSTATUS); }); Module.callMain(); ''') self.emcc_args += ['-s', 'INVOKE_RUN=0', '--post-js', 'post.js'] self.do_run(src, 'hello, world!\ncleanup\nI see exit status: 118') def test_noexitruntime(self): src = r''' #include <emscripten.h> #include <stdio.h> static int testPre = TEST_PRE; struct Global { Global() { printf("in Global()\n"); if (testPre) { EM_ASM(Module['noExitRuntime'] = true;); } } ~Global() { printf("ERROR: in ~Global()\n"); } } global; int main() { if (!testPre) { EM_ASM(Module['noExitRuntime'] = true;); } printf("in main()\n"); } ''' self.do_run(src.replace('TEST_PRE', '0'), 'in Global()\nin main()') self.do_run(src.replace('TEST_PRE', '1'), 'in Global()\nin main()') def test_minmax(self): self.do_run(open(path_from_root('tests', 'test_minmax.c')).read(), 'NAN != NAN\nSuccess!') def test_locale(self): self.do_run_from_file(path_from_root('tests', 'test_locale.c'), path_from_root('tests', 'test_locale.out')) def test_sixtyfour_bit_return_value(self): # This test checks that the most significant 32 bits of a 64 bit long are correctly made available # to native JavaScript applications that wish to interact with compiled code returning 64 bit longs. # The MS 32 bits should be available in Runtime.getTempRet0() even when compiled with -O2 --closure 1 # Compile test.c and wrap it in a native JavaScript binding so we can call our compiled function from JS. Popen([PYTHON, EMCC, path_from_root('tests', 'return64bit', 'test.c'), '--pre-js', path_from_root('tests', 'return64bit', 'testbindstart.js'), '--pre-js', path_from_root('tests', 'return64bit', 'testbind.js'), '--post-js', path_from_root('tests', 'return64bit', 'testbindend.js'), '-s', 'EXPORTED_FUNCTIONS=["_test"]', '-o', 'test.js', '-O2', '--closure', '1'], stdout=PIPE, stderr=PIPE).communicate() # Simple test program to load the test.js binding library and call the binding to the # C function returning the 64 bit long. open(os.path.join(self.get_dir(), 'testrun.js'), 'w').write(''' var test = require("./test.js"); test.runtest(); ''') # Run the test and confirm the output is as expected. if NODE_JS in JS_ENGINES: out = run_js('testrun.js', engine=NODE_JS, full_output=True) assert "low = 5678" in out assert "high = 1234" in out def test_async(self): self.banned_js_engines = [SPIDERMONKEY_ENGINE, V8_ENGINE] # needs setTimeout which only node has src = r''' #include <stdio.h> #include <emscripten.h> void f(void *p) { *(int*)p = 99; printf("!"); } int main() { int i = 0; printf("Hello"); emscripten_async_call(f, &i, 1); printf("World"); emscripten_%s(100); printf("%%d\n", i); } ''' % ('sleep_with_yield' if self.is_emterpreter() else 'sleep') if not self.is_emterpreter(): Settings.ASYNCIFY = 1 else: Settings.EMTERPRETIFY_ASYNC = 1 self.do_run(src, 'HelloWorld!99'); if self.is_emterpreter(): print 'check bad ccall use' src = r''' #include <stdio.h> #include <emscripten.h> int main() { printf("Hello"); emscripten_sleep(100); printf("World\n"); } ''' Settings.ASSERTIONS = 1 Settings.INVOKE_RUN = 0 open('post.js', 'w').write(''' try { Module['ccall']('main', 'number', ['number', 'string'], [2, 'waka']); var never = true; } catch(e) { Module.print(e); assert(!never); } ''') self.emcc_args += ['--post-js', 'post.js'] self.do_run(src, 'cannot start async op with normal JS'); print 'check reasonable ccall use' src = r''' #include <stdio.h> #include <emscripten.h> int main() { printf("Hello"); emscripten_sleep(100); printf("World\n"); } ''' open('post.js', 'w').write(''' Module['ccall']('main', null, ['number', 'string'], [2, 'waka'], { async: true }); ''') self.do_run(src, 'HelloWorld'); def test_async_returnvalue(self): if not self.is_emterpreter(): return self.skip('emterpreter-only test') Settings.EMTERPRETIFY_ASYNC = 1 self.banned_js_engines = [SPIDERMONKEY_ENGINE, V8_ENGINE] # needs setTimeout which only node has open('lib.js', 'w').write(r''' mergeInto(LibraryManager.library, { sleep_with_return__deps: ['$EmterpreterAsync'], sleep_with_return: function(ms) { return EmterpreterAsync.handle(function(resume) { var startTime = Date.now(); setTimeout(function() { if (ABORT) return; // do this manually; we can't call into Browser.safeSetTimeout, because that is paused/resumed! resume(function() { return Date.now() - startTime; }); }, ms); }); } }); ''') src = r''' #include <stdio.h> #include <assert.h> #include <emscripten.h> extern "C" { extern int sleep_with_return(int ms); } int main() { int ms = sleep_with_return(1000); assert(ms >= 900); printf("napped for %d ms\n", ms); } ''' self.emcc_args += ['--js-library', 'lib.js'] self.do_run(src, 'napped'); def test_async_exit(self): if not self.is_emterpreter(): return self.skip('emterpreter-only test') Settings.EMTERPRETIFY_ASYNC = 1 self.banned_js_engines = [SPIDERMONKEY_ENGINE, V8_ENGINE] # needs setTimeout which only node has self.do_run(r''' #include <stdio.h> #include <stdlib.h> #include <emscripten.h> void f() { printf("f\n"); emscripten_sleep(1); printf("hello\n"); static int i = 0; i++; if(i == 5) { printf("exit\n"); exit(0); printf("world\n"); i = 0; } } int main() { while(1) { f(); } return 0; } ''', 'f\nhello\nf\nhello\nf\nhello\nf\nhello\nf\nhello\nexit\n') def test_coroutine(self): src = r''' #include <stdio.h> #include <emscripten.h> void fib(void * arg) { int * p = (int*)arg; int cur = 1; int next = 1; for(int i = 0; i < 9; ++i) { *p = cur; emscripten_yield(); int next2 = cur + next; cur = next; next = next2; } } void f(void * arg) { int * p = (int*)arg; *p = 0; emscripten_yield(); fib(arg); // emscripten_yield in fib() can `pass through` f() back to main(), and then we can assume inside fib() } void g(void * arg) { int * p = (int*)arg; for(int i = 0; i < 10; ++i) { *p = 100+i; emscripten_yield(); } } int main(int argc, char **argv) { int i; emscripten_coroutine co = emscripten_coroutine_create(f, (void*)&i, 0); emscripten_coroutine co2 = emscripten_coroutine_create(g, (void*)&i, 0); printf("*"); while(emscripten_coroutine_next(co)) { printf("%d-", i); emscripten_coroutine_next(co2); printf("%d-", i); } printf("*"); return 0; } ''' Settings.ASYNCIFY = 1; self.do_run(src, '*0-100-1-101-1-102-2-103-3-104-5-105-8-106-13-107-21-108-34-109-*'); def test_cxx_self_assign(self): # See https://github.com/kripken/emscripten/pull/2688 and http://llvm.org/bugs/show_bug.cgi?id=18735 open('src.cpp', 'w').write(r''' #include <map> #include <stdio.h> int main() { std::map<int, int> m; m[0] = 1; m = m; // size should still be one after self assignment if (m.size() == 1) { printf("ok.\n"); } } ''') Popen([PYTHON, EMCC, 'src.cpp']).communicate() self.assertContained('ok.', run_js('a.out.js', args=['C'])) def test_memprof_requirements(self): # This test checks for the global variables required to run the memory # profiler. It would fail if these variables were made no longer global # or if their identifiers were changed. open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(''' extern "C" { void check_memprof_requirements(); } int main() { check_memprof_requirements(); return 0; } ''') open(os.path.join(self.get_dir(), 'lib.js'), 'w').write(''' mergeInto(LibraryManager.library, { check_memprof_requirements: function() { if (typeof TOTAL_MEMORY === 'number' && typeof STATIC_BASE === 'number' && typeof STATICTOP === 'number' && typeof STACK_BASE === 'number' && typeof STACK_MAX === 'number' && typeof STACKTOP === 'number' && typeof DYNAMIC_BASE === 'number' && typeof DYNAMICTOP === 'number') { Module.print('able to run memprof'); } else { Module.print('missing the required variables to run memprof'); } } }); ''') self.emcc_args += ['--js-library', os.path.join(self.get_dir(), 'lib.js')] self.do_run(open(os.path.join(self.get_dir(), 'main.cpp'), 'r').read(), 'able to run memprof') # Generate tests for everything def make_run(fullname, name=-1, compiler=-1, embetter=0, quantum_size=0, typed_arrays=0, emcc_args=None, env=None): if env is None: env = {} TT = type(fullname, (T,), dict(run_name = fullname, env = env)) def tearDown(self): try: super(TT, self).tearDown() finally: for k, v in self.env.iteritems(): del os.environ[k] # clear global changes to Building Building.COMPILER_TEST_OPTS = [] Building.COMPILER = CLANG Building.LLVM_OPTS = 0 TT.tearDown = tearDown def setUp(self): super(TT, self).setUp() for k, v in self.env.iteritems(): assert k not in os.environ, k + ' should not be in environment' os.environ[k] = v global checked_sanity if not checked_sanity: print '(checking sanity from test runner)' # do this after we set env stuff check_sanity(force=True) checked_sanity = True Building.COMPILER_TEST_OPTS = ['-g'] os.chdir(self.get_dir()) # Ensure the directory exists and go there Building.COMPILER = compiler assert emcc_args is not None self.emcc_args = emcc_args[:] Settings.load(self.emcc_args) Building.LLVM_OPTS = 0 if '-O2' in self.emcc_args or '-O3' in self.emcc_args: Building.COMPILER_TEST_OPTS = [] # remove -g in -O2 tests, for more coverage #Building.COMPILER_TEST_OPTS += self.emcc_args for arg in self.emcc_args: if arg.startswith('-O'): Building.COMPILER_TEST_OPTS.append(arg) # so bitcode is optimized too, this is for cpp to ll else: try: key, value = arg.split('=') Settings[key] = value # forward -s K=V except: pass return TT.setUp = setUp return TT # Main test modes default = make_run("default", compiler=CLANG, emcc_args=["-s", "ASM_JS=2"]) asm1 = make_run("asm1", compiler=CLANG, emcc_args=["-O1"]) asm2 = make_run("asm2", compiler=CLANG, emcc_args=["-O2"]) asm3 = make_run("asm3", compiler=CLANG, emcc_args=["-O3"]) asm2f = make_run("asm2f", compiler=CLANG, emcc_args=["-Oz", "-s", "PRECISE_F32=1", "-s", "ALLOW_MEMORY_GROWTH=1"]) asm2g = make_run("asm2g", compiler=CLANG, emcc_args=["-O2", "-g", "-s", "ASSERTIONS=1", "-s", "SAFE_HEAP=1"]) asm1i = make_run("asm1i", compiler=CLANG, emcc_args=["-O1", '-s', 'EMTERPRETIFY=1']) asm3i = make_run("asm3i", compiler=CLANG, emcc_args=["-O3", '-s', 'EMTERPRETIFY=1']) asm2m = make_run("asm2m", compiler=CLANG, emcc_args=["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1"]) # Legacy test modes - asm2nn = make_run("asm2nn", compiler=CLANG, emcc_args=["-O2"], env={"EMCC_NATIVE_OPTIMIZER": "0"}) del T # T is just a shape for the specific subclasses, we don't test it itself
slightperturbation/Cobalt
ext/emsdk_portable/emscripten/tag-1.34.1/tests/test_core.py
Python
apache-2.0
251,267
# An OLE file format parser import os import struct import logging import datetime PIDSI = {'PIDSI_CODEPAGE':0x01, 'PIDSI_TITLE':0x02, 'PIDSI_SUBJECT':0x03, 'PIDSI_AUTHOR':0x04, 'PIDSI_KEYWORDS':0x05, 'PIDSI_COMMENTS':0x06, 'PIDSI_TEMPLATE':0x07, 'PIDSI_LASTAUTHOR':0x08, 'PIDSI_REVNUMBER':0x09, 'PIDSI_EDITTIME':0x0A, 'PIDSI_LASTPRINTED':0x0B, 'PIDSI_CREATE_DTM':0x0C, 'PIDSI_LASTSAVE_DTM':0x0D, 'PIDSI_PAGECOUNT':0x0E, 'PIDSI_WORDCOUNT':0x0F, 'PIDSI_CHARCOUNT':0x10, 'PIDSI_APPNAME':0x12, 'PIDSI_DOC_SECURITY':0x13} PIDDSI = {'GKPIDDSI_CODEPAGE':0x01, 'GKPIDDSI_CATEGORY':0x02, 'GKPIDDSI_PRESFORMAT':0x03, 'GKPIDDSI_BYTECOUNT':0x04, 'GKPIDDSI_LINECOUNT':0x05, 'GKPIDDSI_PARACOUNT':0x06, 'GKPIDDSI_SLIDECOUNT':0x07, 'GKPIDDSI_NOTECOUNT':0x08, 'GKPIDDSI_HIDDENCOUNT':0x09, 'GKPIDDSI_MMCLIPCOUNT':0x0A, 'GKPIDDSI_SCALE':0x0B, 'GKPIDDSI_HEADINGPAIR':0x0C, 'GKPIDDSI_DOCPARTS':0x0D, 'GKPIDDSI_MANAGER':0x0E, 'GKPIDDSI_COMPANY':0x0F, 'GKPIDDSI_LINKSDIRTY':0x10, 'GKPIDDSI_CCHWITHSPACES':0x11, 'GKPIDDSI_SHAREDDOC':0x13, 'GKPIDDSI_LINKBASE':0x14, 'GKPIDDSI_HLINKS':0x15, 'GKPIDDSI_HYPERLINKSCHANGED':0x16, 'GKPIDDSI_VERSION':0x17, 'GKPIDDSI_DIGSIG':0x18, 'GKPIDDSI_CONTENTTYPE':0x1A, 'GKPIDDSI_CONTENTSTATUS':0x1B, 'GKPIDDSI_LANGUAGE':0x1C, 'GKPIDDSI_DOCVERSION':0x1D} PropertyType= {'VT_EMPTY':0x00, 'VT_NULL':0x01, 'VT_I2':0x02, 'VT_I4':0x03, 'VT_R4':0x04, 'VT_R8':0x05, 'VT_CY':0x06, 'VT_DATE': 0x07, 'VT_BSTR':0x08, 'VT_ERROR':0x0A, 'VT_BOOL':0x0B, 'VT_VARIANT':0x0C, 'VT_DECIMAL':0x0E, 'VT_I1':0x10, 'VT_UI1':0x11, 'VT_UI2':0x12, 'VT_UI4':0x13, 'VT_I8':0x14, 'VT_UI8':0x15, 'VT_INT':0x16, 'VT_UINT':0x17, 'VT_LPSTR':0x1E, 'VT_LPWSTR':0x1F, 'VT_FILETIME':0x40, 'VT_BLOB':0x41, 'VT_STREAM':0x42, 'VT_STORAGE':0x43, 'VT_STREAMED_Object':0x44, 'VT_STORED_Object':0x45, 'VT_BLOB_Object':0x46, 'VT_CF':0x47, 'VT_CLSID':0x48, 'VT_VERSIONED_STREAM':0x49, 'VT_VECTOR':0x1000, 'VT_ARRAY':0x2000} def init_logging(debug): ole_logger = logging.getLogger('ole.logger') ch = logging.StreamHandler() formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') if debug: ole_logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) fh = logging.FileHandler('debug.log') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) ole_logger.addHandler(fh) else: ole_logger.setLevel(logging.ERROR) ch.setLevel(logging.ERROR) ch.setFormatter(formatter) ole_logger.addHandler(ch) if debug: ole_logger.debug('In debug mode.') class OLEBase: ole_logger = logging.getLogger('ole.logger') def __init__(self): pass def _raise_exception(self, error): #self.ole_logger.error(error) self.ole_logger.warning(error) raise Exception(error) def _filetime_to_datetime(self, microseconds): seconds, microseconds = divmod(microseconds/10, 1000000) days, seconds = divmod(seconds, 86400) date_time = datetime.datetime(1601, 1, 1, 0, 0, 0) + datetime.timedelta(days, seconds, microseconds) return str(date_time) class OLEHeader(OLEBase): Signature = '' CLSID = '' MinorVersion = 0 MajorVersion = 0 ByteOrder = 0 SectorShift = 0 MiniSectorShift = 0 Reserved = '' NumberOfDirectorySectors = 0 NumberOfFATSectors = 0 FirstDirecotrySector = 0 TransactionSignatureNumber = 0 MiniStreamCutoffSize = 0 FirstMiniFATSector = 0 NumberOfMiniFATSectors = 0 FirstDIFATSector = 0 NumberOfDIFATSectors = 0 DIFAT = list() def __init__(self, data): self.Signature = '' self.CLSID = '' self.MinorVersion = 0 self.MajorVersion = 0 self.ByteOrder = 0 self.SectorShift = 0 self.MiniSectorShift = 0 self.Reserved = '' self.NumberOfDirectorySectors = 0 self.NumberOfFATSectors = 0 self.FirstDirecotrySector = 0 self.TransactionSignatureNumber = 0 self.MiniStreamCutoffSize = 0 self.FirstMiniFATSector = 0 self.NumberOfMiniFATSectors = 0 self.FirstDIFATSector = 0 self.NumberOfDIFATSectors = 0 self.DIFAT = list() self.Signature = data[0x00:0x08] self.ole_logger.debug('OLEHeader.Signature: ' + self.Signature.encode('hex').upper()) if self.Signature != '\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1': self._raise_exception('OLEHeader.Signature verify failed.') self.CLSID = data[0x08:0x18] self.ole_logger.debug('OLEHeader.CLSID: ' + self.CLSID.encode('hex').upper()) if self.CLSID != '\x00' * 16: self.ole_logger.warning('OLEHeader.CLSID is not null.') self.MinorVersion = struct.unpack('<H', data[0x18:0x1A])[0] self.ole_logger.debug('OLEHeader.MinorVersion: ' + str(hex(self.MinorVersion))) self.MajorVersion = struct.unpack('<H', data[0x1A:0x1C])[0] self.ole_logger.debug('OLEHeader.MajorVersion: ' + str(hex(self.MajorVersion))) if self.MajorVersion != 0x03 and self.MajorVersion != 0x04: self._raise_exception('OLEHeader.MajorVersion has an abnormal value.') self.ByteOrder = struct.unpack('<H', data[0x1C:0x1E])[0] if self.ByteOrder == 0xFFFE: self.ole_logger.debug('OLEHeader.ByteOrder: ' + str(hex(self.ByteOrder)) + ' (little-endian)') else: self.ole_logger.debug('OLEHeader.ByteOrder: ' + str(hex(self.ByteOrder))) self._raise_exception('OLEHeader.ByteOrder has an abnormal value.') self.SectorShift = struct.unpack('<H', data[0x1E:0x20])[0] if self.SectorShift == 0x09: self.ole_logger.debug('OLEHeader.SectorShift: ' + str(hex(self.SectorShift)) + ' (512 bytes)') elif self.SectorShift == 0x0C: self.ole_logger.debug('OLEHeader.SectorShift: ' + str(hex(self.SectorShift)) + ' (4096 bytes)') else: self.ole_logger.debug('OLEHeader.SectorShift: ' + str(hex(self.SectorShift))) self._raise_exception('OLEHeader.SectorShift has an abnormal value.') self.MiniSectorShift = struct.unpack('<H', data[0x20:0x22])[0] if self.MiniSectorShift == 0x06: self.ole_logger.debug('OLEHeader.MiniSectorShift: ' + str(hex(self.MiniSectorShift)) + ' (64 bytes)') else: self.ole_logger.debug('OLEHeader.MiniSectorShift: ' + str(hex(self.MiniSectorShift))) self._raise_exception('OLEHeader.MiniSectorShift has an abnormal value.') self.Reserved = data[0x22:0x28] self.ole_logger.debug('OLEHeader.Reserved: ' + self.Reserved.encode('hex').upper()) if self.Reserved != '\x00' * 6: self.ole_logger.warning('OLEHeader.Reserved is not all zeros.') self.NumberOfDirectorySectors = struct.unpack('<I', data[0x28:0x2C])[0] self.ole_logger.debug('OLEHeader.NumberOfDirectorySectors: ' + str(hex(self.NumberOfDirectorySectors))) if self.NumberOfDirectorySectors != 0x0 and self.MajorVersion != 0x04: self._raise_exception('OLEHeader.NumberOfDirectorySectors has an abnormal value.') self.NumberOfFATSectors = struct.unpack('<I', data[0x2C:0x30])[0] self.ole_logger.debug('OLEHeader.NumberOfFATSectors: ' + str(hex(self.NumberOfFATSectors))) self.FirstDirecotrySector = struct.unpack('<I', data[0x30:0x34])[0] self.ole_logger.debug('OLEHeader.FirstDirecotrySector: ' + str(hex(self.FirstDirecotrySector))) if self.FirstDirecotrySector == 0: self._raise_exception('OLEHeader.FirstDirecotrySector is zero.') self.TransactionSignatureNumber = struct.unpack('<I', data[0x34:0x38])[0] self.ole_logger.debug('OLEHeader.TransactionSignatureNumber: ' + str(hex(self.TransactionSignatureNumber))) self.MiniStreamCutoffSize = struct.unpack('<I', data[0x38:0x3C])[0] self.ole_logger.debug('OLEHeader.MiniStreamCutoffSize: ' + str(hex(self.MiniStreamCutoffSize))) if self.MiniStreamCutoffSize != 0x1000: self._raise_exception('OLEHeader.MiniStreamCutoffSize has an abnormal value.') self.FirstMiniFATSector = struct.unpack('<I', data[0x3C:0x40])[0] self.ole_logger.debug('OLEHeader.FirstMiniFATSector: ' + str(hex(self.FirstMiniFATSector))) self.NumberOfMiniFATSectors = struct.unpack('<I', data[0x40:0x44])[0] self.ole_logger.debug('OLEHeader.NumberOfMiniFATSectors: ' + str(hex(self.NumberOfMiniFATSectors))) if self.NumberOfMiniFATSectors > 0 and self.FirstMiniFATSector == 0xFFFFFFFE: self._raise_exception('OLEHeader.NumberOfMiniFATSectors or OLEHeader.FirstMiniFATSector has an abnormal value.') self.FirstDIFATSector = struct.unpack('<I', data[0x44:0x48])[0] self.ole_logger.debug('OLEHeader.FirstDIFATSector: ' + str(hex(self.FirstDIFATSector))) self.NumberOfDIFATSectors = struct.unpack('<I', data[0x48:0x4C])[0] self.ole_logger.debug('OLEHeader.NumberOfDIFATSectors: ' + str(hex(self.NumberOfDIFATSectors))) if self.NumberOfDIFATSectors > 0 and self.FirstDIFATSector == 0xFFFFFFFE: self._raise_exception('OLEHeader.NumberOfDIFATSectors or OLEHeader.FirstDIFATSector has an abnormal value.') for i in range(0, 109): difat = struct.unpack('<I', data[0x4C+i*4:0x4C+i*4+4])[0] if difat == 0xFFFFFFFF: break self.ole_logger.debug('OLEHeader.DIFAT[' + str(i) + '] :' + str(hex(difat))) self.DIFAT.append(difat) i += 1 for j in range(i, 109): difat = struct.unpack('<I', data[0x4C+j*4:0x4C+j*4+4])[0] if difat != 0xFFFFFFFF: self._raise_exception('OLEHeader.DIFAT[' + str(j) + '] has an abnormal value.') class Directory(OLEBase): Name = '' NameLength = 0 ObjectType = 0 ColorFlag = 0 LeftSiblingID = 0 RightSiblingID = 0 ChildID = 0 CLSID = '' StateBits = 0 CreationTime = '' ModifiedTime = '' StartingSector = 0 StreamSize = 0 def __init__(self, data): self.Name = '' self.NameLength = 0 self.ObjectType = 0 self.ColorFlag = 0 self.LeftSiblingID = 0 self.RightSiblingID = 0 self.ChildID = 0 self.CLSID = '' self.StateBits = 0 self.CreationTime = '' self.ModifiedTime = '' self.StartingSector = 0 self.StreamSize = 0 self.Name = data[0:0x40].decode('utf-16').strip('\x00') self.ole_logger.debug('Dir.Name: ' + self.Name) self.NameLength = struct.unpack('<H', data[0x40:0x42])[0] self.ole_logger.debug('Dir.NameLength: ' + str(self.NameLength)) if self.NameLength != len(self.Name)*2+2: self._raise_exception('DirectoryEntry.NameLength has a wrong value.') self.ObjectType = ord(data[0x42]) if self.ObjectType == 0x00: self.ole_logger.debug('Dir.ObjectType: ' + str(self.ObjectType) + ' (unallocated)') elif self.ObjectType == 0x01: self.ole_logger.debug('Dir.ObjectType: ' + str(self.ObjectType) + ' (storage object)') elif self.ObjectType == 0x02: self.ole_logger.debug('Dir.ObjectType: ' + str(self.ObjectType) + ' (stream object)') elif self.ObjectType == 0x05: self.ole_logger.debug('Dir.ObjectType: ' + str(self.ObjectType) + ' (root storage object)') else: self._raise_exception('DirectoryEntry.ObjectType has an abnormal value.') self.ColorFlag = ord(data[0x43]) if self.ColorFlag == 0x00: self.ole_logger.debug('Dir.ColorFlag: ' + str(self.ColorFlag) + ' (red)') elif self.ColorFlag == 0x01: self.ole_logger.debug('Dir.ColorFlag: ' + str(self.ColorFlag) + ' (black)') else: self._raise_exception('DirectoryEntry.ColorFlag has an abnormal value.') self.LeftSiblingID = struct.unpack('<I', data[0x44:0x48])[0] if self.LeftSiblingID >= 0 and self.LeftSiblingID <= 0xFFFFFFF9: self.ole_logger.debug('Dir.LeftSiblingID: ' + str(hex(self.LeftSiblingID)) + ' (REGSID)') elif self.LeftSiblingID == 0xFFFFFFFF: self.ole_logger.debug('Dir.LeftSiblingID: ' + str(hex(self.LeftSiblingID)) + ' (NOSTREAM)') else: self._raise_exception('DirectoryEntry.LeftSiblingID has an abnormal value.') self.RightSiblingID = struct.unpack('<I', data[0x48:0x4C])[0] if self.RightSiblingID >= 0 and self.RightSiblingID <= 0xFFFFFFF9: self.ole_logger.debug('Dir.RightSiblingID: ' + str(hex(self.RightSiblingID)) + ' (REGSID)') elif self.RightSiblingID == 0xFFFFFFFF: self.ole_logger.debug('Dir.LeftSiblingID: ' + str(hex(self.RightSiblingID)) + ' (NOSTREAM)') else: self._raise_exception('DirectoryEntry.RightSiblingID has an abnormal value.') self.ChildID = struct.unpack('<I', data[0x4C:0x50])[0] if self.ChildID >= 0 and self.ChildID <= 0xFFFFFFF9: self.ole_logger.debug('Dir.ChildID: ' + str(hex(self.ChildID)) + ' (REGSID)') elif self.ChildID == 0xFFFFFFFF: self.ole_logger.debug('Dir.ChildID: ' + str(hex(self.ChildID)) + ' (NOSTREAM)') else: self._raise_exception('DirectoryEntry.ChildID has an abnormal value.') self.CLSID = data[0x50:0x60] self.ole_logger.debug('Dir.CLSID: ' + self.CLSID.encode('hex')) self.StateBits = struct.unpack('<I', data[0x60:0x64])[0] self.ole_logger.debug('Dir.StateBits: ' + str(hex(self.StateBits))) self.CreationTime = struct.unpack('<Q', data[0x64:0x6C])[0] self.ole_logger.debug('Dir.CreationTime: ' + self._filetime_to_datetime(self.CreationTime)) self.ModifiedTime = struct.unpack('<Q', data[0x6C:0x74])[0] self.ole_logger.debug('Dir.ModifiedTime: ' + self._filetime_to_datetime(self.ModifiedTime)) self.StartingSector = struct.unpack('<I', data[0x74:0x78])[0] self.ole_logger.debug('Dir.StartingSector: ' + str(hex(self.StartingSector))) self.StreamSize = struct.unpack('<Q', data[0x78:0x80])[0] self.ole_logger.debug('Dir.StreamSize: ' + str(hex(self.StreamSize))) class PropertyIdentifierAndOffset(OLEBase): PropertyIdentifier = 0 Offset = 0 def __init__(self, data): self.PropertyIdentifier = 0 self.Offset = 0 self.PropertyIdentifier = struct.unpack('<I', data[0:4])[0] self.ole_logger.debug('PropertyIdentifierAndOffset.PropertyIdentifier: ' + str(hex(self.PropertyIdentifier))) self.Offset = struct.unpack('<I', data[4:8])[0] self.ole_logger.debug('PropertyIdentifierAndOffset.Offset: ' + str(hex(self.Offset))) class DocSummaryInfoPropertySet(OLEBase): Size = 0 NumProperties = 0 PropertyIdentifierAndOffset = list() Property = list() def __init__(self, data): self.Size = 0 self.NumProperties = 0 self.PropertyIdentifierAndOffset = list() self.Property = list() self.Size = struct.unpack('<I', data[0x00:0x04])[0] self.ole_logger.debug('DocSummaryInfoPropertySet.Size: ' + str(hex(self.Size))) self.NumProperties = struct.unpack('<I', data[0x04:0x08])[0] self.ole_logger.debug('DocSummaryInfoPropertySet.NumProperties: ' + str(hex(self.NumProperties))) for i in range(0, self.NumProperties): piao = PropertyIdentifierAndOffset(data[0x08+i*8:0x08+i*8+8]) self.PropertyIdentifierAndOffset.append(piao) for i in range(0, self.NumProperties): if (i+1) < self.NumProperties: if self.PropertyIdentifierAndOffset[i].Offset < self.PropertyIdentifierAndOffset[i+1].Offset: property = data[self.PropertyIdentifierAndOffset[i].Offset:self.PropertyIdentifierAndOffset[i+1].Offset] else: self.ole_logger.warning('DocSummaryInfoPropertySet.PropertyIdentifierAndOffset.Offset is not in increasing order.') property = data[self.PropertyIdentifierAndOffset[i].Offset:self.Size] else: property = data[self.PropertyIdentifierAndOffset[i].Offset:self.Size] self.Property.append(property) for i in range(0, self.NumProperties): if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_CODEPAGE']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_CODEPAGE.type: ' + str(hex(type))) if type != PropertyType['VT_I2']: self._raise_exception('Property.GKPIDDSI_CODEPAGE has an abnormal value.') codepage = struct.unpack('<H', self.Property[i][0x04:0x06])[0] self.ole_logger.debug('Property.GKPIDDSI_CODEPAGE: ' + str(hex(codepage))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_COMPANY']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_COMPANY.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.GKPIDDSI_COMPANY.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.GKPIDDSI_COMPANY.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: company = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: company = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.GKPIDDSI_COMPANY has an abnormal value.') self.ole_logger.debug('Property.GKPIDDSI_COMPANY: ' + company) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_LINECOUNT']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_LINECOUNT.type: ' + str(hex(type))) if type != PropertyType['VT_I4']: self._raise_exception('Property.GKPIDDSI_LINECOUNT has an abnormal value.') linecount = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.GKPIDDSI_LINECOUNT: ' + str(hex(linecount))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_PARACOUNT']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_PARACOUNT.type: ' + str(hex(type))) if type != PropertyType['VT_I4']: self._raise_exception('Property.GKPIDDSI_PARACOUNT has an abnormal value.') pagecount = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.GKPIDDSI_PARACOUNT: ' + str(hex(pagecount))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_CCHWITHSPACES']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_CCHWITHSPACES.type: ' + str(hex(type))) if type != PropertyType['VT_I4']: self._raise_exception('Property.GKPIDDSI_CCHWITHSPACES has an abnormal value.') pagecount = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.GKPIDDSI_CCHWITHSPACES: ' + str(hex(pagecount))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_VERSION']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_VERSION.type: ' + str(hex(type))) if type != PropertyType['VT_I4']: self._raise_exception('Property.GKPIDDSI_VERSION has an abnormal value.') minorversion = struct.unpack('<H', self.Property[i][0x04:0x06])[0] majorverson= struct.unpack('<H', self.Property[i][0x06:0x08])[0] if majorverson == 0: self._raise_exception('Property.GKPIDDSI_VERSION.MajorVersion has an abnormal value.') self.ole_logger.debug('Property.GKPIDDSI_VERSION.MajorVersion: ' + str(hex(majorverson))) self.ole_logger.debug('Property.GKPIDDSI_VERSION.MinorVersion: ' + str(hex(minorversion))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_DOCPARTS']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.type: ' + str(hex(type))) celements = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.cElements: ' + str(hex(celements))) if type == (PropertyType['VT_VECTOR'] | PropertyType['VT_LPSTR']): offset = 0 for j in range(0, celements): cch = struct.unpack('<I', self.Property[i][0x08+offset:0x0C+offset])[0] self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + '].cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + '].cch has an abnormal value.') value = self.Property[i][0x0C+offset:0x0C+offset+cch] self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + ']: ' + value.encode('hex')) offset = offset + 4 + cch elif type == (PropertyType['VT_VECTOR'] | PropertyType['VT_LPWSTR']): offset = 0 for j in range(0, celements): cch = struct.unpack('<I', self.Property[i][0x08+offset:0x0C+offset])[0] self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + '].cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + '].cch has an abnormal value.') value = self.Property[i][0x0C+offset:0x0C+offset+cch*2].decode('utf-16') self.ole_logger.debug('Property.GKPIDDSI_DOCPARTS.vtValue.rgString[' + str(j) + ']: ' + value.encode('hex')) offset = offset + 4 + cch*2 else: self._raise_exception('Property.GKPIDDSI_DOCPARTS.type has an abnormal value.') continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDDSI['GKPIDDSI_HEADINGPAIR']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.type: ' + str(hex(type))) if type != (PropertyType['VT_VECTOR'] | PropertyType['VT_VARIANT']): self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.type has an abnormal value.') celements = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.cElements: ' + str(hex(celements))) offset = 0 for j in range(0, celements/2): strtype = struct.unpack('<H', self.Property[i][0x08+offset:0x0A+offset])[0] self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString.type: ' + str(hex(strtype))) cch = struct.unpack('<I', self.Property[i][0x0C+offset:0x10+offset])[0] self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString.cch has an abnormal value.') if strtype == PropertyType['VT_LPSTR']: value = self.Property[i][0x10+offset:0x10+offset+cch] self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString: ' + value) partstype = struct.unpack('<H', self.Property[i][0x10+offset+cch:0x10+offset+cch+0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts.type: ' + str(hex(partstype))) if partstype != PropertyType['VT_I4']: self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts.type has an abnormal value.') parts = struct.unpack('<I', self.Property[i][0x10+offset+cch+0x04:0x10+offset+cch+0x08])[0] self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts: ' + str(hex(parts))) offset = offset + 0x10 + cch elif strtype == PropertyType['VT_LPWSTR']: value = self.Property[i][0x10+offset:0x10+offset+cch*2].decode('utf-16') self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString: ' + value) partstype = struct.unpack('<H', self.Property[i][0x10+offset+cch*2:0x10+offset+cch*2+0x02])[0] self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts.type: ' + str(hex(partstype))) if partstype != PropertyType['VT_I4']: self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts.type has an abnormal value.') parts = struct.unpack('<I', self.Property[i][0x10+offset+cch*2+0x04:0x10+offset+cch*2+0x08])[0] self.ole_logger.debug('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headerParts: ' + str(hex(parts))) offset = offset + 0x10 + cch*2 else: self._raise_exception('Property.GKPIDDSI_HEADINGPAIR.vtValue.rgHeadingPairs[' + str(j) + '].headingString.type has an abnormal value.') continue class DocSummaryInfo(OLEBase): byteOrder = 0 version = 0 sysId = 0 OSMajorVersion = 0 OSMinorVersion = 0 OSType = 0 applicationClsid = '' cSections = 0 formatId1 = '' sectionOffset1 = 0 formatId2 = '' sectionOffset2 = 0 DocumentSummaryInfoPropertySet = None def __init__(self, data): self.byteOrder = 0 self.version = 0 self.sysId = 0 self.OSMajorVersion = 0 self.OSMinorVersion = 0 self.OSType = 0 self.applicationClsid = '' self.cSections = 0 self.formatId1 = '' self.sectionOffset1 = 0 self.formatId2 = '' self.sectionOffset2 = 0 self.DocumentSummaryInfoPropertySet = None self.ole_logger.debug('######## DocumentSummaryInfo ########') self.byteOrder = struct.unpack('<H', data[0x00:0x02])[0] self.ole_logger.debug('DocumentSummaryInfo.byteOrder: ' + str(hex(self.byteOrder))) if self.byteOrder != 0xFFFE: self._raise_exception('DocumentSummaryInfo.byteOrder has an abnormal value.') self.version = struct.unpack('<H', data[0x02:0x04])[0] self.ole_logger.debug('DocumentSummaryInfo.version: ' + str(hex(self.version))) if self.version != 0 and self.version != 1: self._raise_exception('DocumentSummaryInfo.version has an abnormal value.') self.sysId = struct.unpack('<I', data[0x04:0x08])[0] self.OSMajorVersion = ord(data[0x04]) self.ole_logger.debug('DocumentSummaryInfo.sysId.OSMajorVersion: ' + str(hex(self.OSMajorVersion))) self.OSMinorVersion = ord(data[0x05]) self.ole_logger.debug('DocumentSummaryInfo.sysId.OSMinorVersion: ' + str(hex(self.OSMinorVersion))) self.OSType = struct.unpack('<H', data[0x06:0x08])[0] self.ole_logger.debug('DocumentSummaryInfo.sysId.OSType: ' + str(hex(self.OSType))) self.applicationClsid = data[0x08:0x18] self.ole_logger.debug('DocumentSummaryInfo.applicationClsid: ' + self.applicationClsid.encode('hex')) if self.applicationClsid != '\x00' * 0x10: self._raise_exception('DocumentSummaryInfo.applicationClsid has an abnormal value.') self.cSections = struct.unpack('<I', data[0x18:0x1C])[0] self.ole_logger.debug('DocumentSummaryInfo.cSections: ' + str(hex(self.cSections))) if self.cSections != 1 and self.cSections != 2: self._raise_exception('DocumentSummaryInfo.cSections has an abnormal value.') self.formatId1 = data[0x1C:0x2C] self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-1.formatId: ' + self.formatId1.encode('hex')) if self.formatId1 != '\x02\xD5\xCD\xD5\x9C\x2E\x1B\x10\x93\x97\x08\x00\x2B\x2C\xF9\xAE': self._raise_exception('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-1.formatId has an abnormal value.') self.sectionOffset1 = struct.unpack('<I', data[0x2C:0x30])[0] self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-1.sectionOffset: ' + str(hex(self.sectionOffset1))) if self.cSections == 2: self.formatId2 = data[0x30:0x40] self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-2.formatId: ' + self.formatId2.encode('hex')) if self.formatId2 != '\x05\xD5\xCD\xD5\x9C\x2E\x1B\x10\x93\x97\x08\x00\x2B\x2C\xF9\xAE': self._raise_exception('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-2.formatId has an abnormal value.') self.sectionOffset2 = struct.unpack('<I', data[0x40:0x44])[0] self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-2.sectionOffset: ' + str(hex(self.sectionOffset2))) self.DocumentSummaryInfoPropertySet = DocSummaryInfoPropertySet(data[self.sectionOffset1:]) class SummaryInfoPropertySet(OLEBase): Size = 0 NumProperties = 0 PropertyIdentifierAndOffset = list() Property = list() def __init__(self, data): self.Size = 0 self.NumProperties = 0 self.PropertyIdentifierAndOffset = list() self.Property = list() self.Size = struct.unpack('<I', data[0x00:0x04])[0] self.ole_logger.debug('SummaryInfoPropertySet.Size: ' + str(hex(self.Size))) self.NumProperties = struct.unpack('<I', data[0x04:0x08])[0] self.ole_logger.debug('SummaryInfoPropertySet.NumProperties: ' + str(hex(self.NumProperties))) for i in range(0, self.NumProperties): piao = PropertyIdentifierAndOffset(data[0x08+i*8:0x08+i*8+8]) self.PropertyIdentifierAndOffset.append(piao) for i in range(0, self.NumProperties): if (i+1) < self.NumProperties: if self.PropertyIdentifierAndOffset[i].Offset < self.PropertyIdentifierAndOffset[i+1].Offset: property = data[self.PropertyIdentifierAndOffset[i].Offset:self.PropertyIdentifierAndOffset[i+1].Offset] else: self.ole_logger.warning('SummaryInfoPropertySet.PropertyIdentifierAndOffset.Offset is not in increasing order.') property = data[self.PropertyIdentifierAndOffset[i].Offset:self.Size] else: property = data[self.PropertyIdentifierAndOffset[i].Offset:self.Size] self.Property.append(property) for i in range(0, self.NumProperties): if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_CODEPAGE']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_CODEPAGE.type: ' + str(hex(type))) if type != PropertyType['VT_I2']: self._raise_exception('Property.PIDSI_CODEPAGE has an abnormal value.') codepage = struct.unpack('<H', self.Property[i][0x04:0x06])[0] self.ole_logger.debug('Property.PIDSI_CODEPAGE: ' + str(hex(codepage))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_TITLE']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_TITLE.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_TITLE.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.PIDSI_TITLE.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: data = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: data = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.PIDSI_TITLE has an abnormal value.') self.ole_logger.debug('Property.PIDSI_TITLE: ' + data) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_SUBJECT']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_SUBJECT.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_SUBJECT.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.PIDSI_SUBJECT.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: data = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: data = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.PIDSI_SUBJECT has an abnormal value.') self.ole_logger.debug('Property.PIDSI_SUBJECT: ' + data) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_AUTHOR']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_AUTHOR.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_AUTHOR.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.PIDSI_AUTHOR.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: data = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: data = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.PIDSI_AUTHOR has an abnormal value.') self.ole_logger.debug('Property.PIDSI_AUTHOR: ' + data) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_KEYWORDS']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_KEYWORDS.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_KEYWORDS.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.PIDSI_KEYWORDS.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: data = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: data = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.PIDSI_KEYWORDS has an abnormal value.') self.ole_logger.debug('Property.PIDSI_KEYWORDS: ' + data) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_COMMENTS']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_COMMENTS.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_COMMENTS.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.PIDSI_COMMENTS.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: data = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: data = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.PIDSI_COMMENTS has an abnormal value.') self.ole_logger.debug('Property.PIDSI_COMMENTS: ' + data) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_TEMPLATE']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_TEMPLATE.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_TEMPLATE.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.PIDSI_TEMPLATE.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: data = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: data = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.PIDSI_TEMPLATE has an abnormal value.') self.ole_logger.debug('Property.PIDSI_TEMPLATE: ' + data) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_LASTAUTHOR']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_LASTAUTHOR.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_LASTAUTHOR.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.PIDSI_LASTAUTHOR.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: data = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: data = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.PIDSI_LASTAUTHOR has an abnormal value.') self.ole_logger.debug('Property.PIDSI_LASTAUTHOR: ' + data) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_REVNUMBER']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_REVNUMBER.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_REVNUMBER.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.PIDSI_REVNUMBER.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: data = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: data = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.PIDSI_REVNUMBER has an abnormal value.') self.ole_logger.debug('Property.PIDSI_REVNUMBER: ' + data) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_APPNAME']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_APPNAME.type: ' + str(hex(type))) cch = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_APPNAME.cch: ' + str(hex(cch))) if cch > 0x0000FFFF: self._raise_exception('Property.PIDSI_APPNAME.cch has an abnormal value.') if type == PropertyType['VT_LPSTR']: data = self.Property[i][0x08:0x08+cch] elif type == PropertyType['VT_LPWSTR']: data = self.Property[i][0x08:0x08+cch*2].decode('utf-16') else: self._raise_exception('Property.PIDSI_APPNAME has an abnormal value.') self.ole_logger.debug('Property.PIDSI_APPNAME: ' + data) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_EDITTIME']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_APPNAME.type: ' + str(hex(type))) if type != PropertyType['VT_FILETIME']: self._raise_exception('Property.PIDSI_EDITTIME has an abnormal value.') time = struct.unpack('<Q', self.Property[i][0x04:0x0C])[0] self.ole_logger.debug('Property.PIDSI_EDITTIME: ' + str(hex(time))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_LASTPRINTED']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_LASTPRINTED.type: ' + str(hex(type))) if type != PropertyType['VT_FILETIME']: self._raise_exception('Property.PIDSI_LASTPRINTED has an abnormal value.') time = struct.unpack('<Q', self.Property[i][0x04:0x0C])[0] self.ole_logger.debug('Property.PIDSI_LASTPRINTED: ' + self._filetime_to_datetime(time)) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_CREATE_DTM']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_CREATE_DTM.type: ' + str(hex(type))) if type != PropertyType['VT_FILETIME']: self._raise_exception('Property.PIDSI_CREATE_DTM has an abnormal value.') time = struct.unpack('<Q', self.Property[i][0x04:0x0C])[0] self.ole_logger.debug('Property.PIDSI_CREATE_DTM: ' + self._filetime_to_datetime(time)) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_LASTSAVE_DTM']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_LASTSAVE_DTM.type: ' + str(hex(type))) if type != PropertyType['VT_FILETIME']: self._raise_exception('Property.PIDSI_LASTSAVE_DTM has an abnormal value.') time = struct.unpack('<Q', self.Property[i][0x04:0x0C])[0] self.ole_logger.debug('Property.PIDSI_LASTSAVE_DTM: ' + self._filetime_to_datetime(time)) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_PAGECOUNT']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_PAGECOUNT.type: ' + str(hex(type))) if type != PropertyType['VT_I4']: self._raise_exception('Property.PIDSI_PAGECOUNT has an abnormal value.') count = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_PAGECOUNT: ' + str(hex(count))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_WORDCOUNT']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_WORDCOUNT.type: ' + str(hex(type))) if type != PropertyType['VT_I4']: self._raise_exception('Property.PIDSI_WORDCOUNT has an abnormal value.') count = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_WORDCOUNT: ' + str(hex(count))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_CHARCOUNT']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_CHARCOUNT.type: ' + str(hex(type))) if type != PropertyType['VT_I4']: self._raise_exception('Property.PIDSI_CHARCOUNT has an abnormal value.') count = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_CHARCOUNT: ' + str(hex(count))) continue if self.PropertyIdentifierAndOffset[i].PropertyIdentifier == PIDSI['PIDSI_DOC_SECURITY']: type = struct.unpack('<H', self.Property[i][0x00:0x02])[0] self.ole_logger.debug('Property.PIDSI_DOC_SECURITY.type: ' + str(hex(type))) if type != PropertyType['VT_I4']: self._raise_exception('Property.PIDSI_DOC_SECURITY has an abnormal value.') security = struct.unpack('<I', self.Property[i][0x04:0x08])[0] self.ole_logger.debug('Property.PIDSI_DOC_SECURITY: ' + str(hex(security))) continue class SummaryInfo(OLEBase): byteOrder = 0 version = 0 sysId = 0 OSMajorVersion = 0 OSMinorVersion = 0 OSType = 0 applicationClsid = '' cSections = 0 formatId1 = '' sectionOffset1 = 0 formatId2 = '' sectionOffset2 = 0 SummaryInfoPropertySet = None def __init__(self, data): self.byteOrder = 0 self.version = 0 self.sysId = 0 self.OSMajorVersion = 0 self.OSMinorVersion = 0 self.OSType = 0 self.applicationClsid = '' self.cSections = 0 self.formatId1 = '' self.sectionOffset1 = 0 self.formatId2 = '' self.sectionOffset2 = 0 self.SummaryInfoPropertySet = None self.ole_logger.debug('######## SummaryInfo ########') self.byteOrder = struct.unpack('<H', data[0x00:0x02])[0] self.ole_logger.debug('SummaryInfo.byteOrder: ' + str(hex(self.byteOrder))) if self.byteOrder != 0xFFFE: self._raise_exception('DocumentSummaryInfo.byteOrder has an abnormal value.') self.version = struct.unpack('<H', data[0x02:0x04])[0] self.ole_logger.debug('SummaryInfo.version: ' + str(hex(self.version))) if self.version != 0 and self.version != 1: self._raise_exception('SummaryInfo.version has an abnormal value.') self.sysId = struct.unpack('<I', data[0x04:0x08])[0] self.ole_logger.debug('SummaryInfo.sysId: ' + str(hex(self.sysId))) self.clsid = data[0x08:0x18] self.ole_logger.debug('SummaryInfo.clsid: ' + self.clsid.encode('hex')) if self.clsid != '\x00' * 0x10: self._raise_exception('SummaryInfo.clsid has an abnormal value.') self.cSections = struct.unpack('<I', data[0x18:0x1C])[0] self.ole_logger.debug('SummaryInfo.cSections: ' + str(hex(self.cSections))) if self.cSections != 1 and self.cSections != 2: self._raise_exception('SummaryInfo.cSections has an abnormal value.') self.formatId1 = data[0x1C:0x2C] self.ole_logger.debug('SummaryInfo.rgIdOffset.IdOffsetElement-1.formatId: ' + self.formatId1.encode('hex')) if self.formatId1 != '\xE0\x85\x9F\xF2\xF9\x4F\x68\x10\xAB\x91\x08\x00\x2B\x27\xB3\xD9': self._raise_exception('SummaryInfo.rgIdOffset.IdOffsetElement-1.formatId has an abnormal value.') self.sectionOffset1 = struct.unpack('<I', data[0x2C:0x30])[0] self.ole_logger.debug('DocumentSummaryInfo.rgIdOffset.IdOffsetElement-1.sectionOffset: ' + str(hex(self.sectionOffset1))) if self.cSections == 2: self.formatId2 = data[0x30:0x40] self.ole_logger.debug('SummaryInfo.rgIdOffset.IdOffsetElement-2.formatId: ' + self.formatId2.encode('hex')) if self.formatId2 != '\x05\xD5\xCD\xD5\x9C\x2E\x1B\x10\x93\x97\x08\x00\x2B\x2C\xF9\xAE': self._raise_exception('SummaryInfo.rgIdOffset.IdOffsetElement-2.formatId has an abnormal value.') self.sectionOffset2 = struct.unpack('<I', data[0x40:0x44])[0] self.ole_logger.debug('SummaryInfo.rgIdOffset.IdOffsetElement-2.sectionOffset: ' + str(hex(self.sectionOffset2))) self.SummaryInfoPropertySet = SummaryInfoPropertySet(data[self.sectionOffset1:]) class OLEFile(OLEBase): file_data = None sector_size = 0 mini_sector_size = 0 OLEHeader = None DIFAT = list() FAT = list() MiniFAT = list() Directory = list() SummaryInfo = None DocumentSummaryInfo = None def __init__(self, filename): self.file_data = None self.sector_size = 0 self.mini_sector_size = 0 self.OLEHeader = None self.DIFAT = list() self.FAT = list() self.MiniFAT = list() self.Directory = list() self.SummaryInfo = None self.DocumentSummaryInfo = None if os.path.isfile(filename): self.file_data = open(filename, 'rb').read() self.ole_logger.debug('Load file: ' + filename) self.OLEHeader = OLEHeader(self.file_data) if self.OLEHeader.SectorShift == 0x09: self.sector_size = 512 elif self.OLEHeader.SectorShift == 0x0C: self.sector_size = 4096 else: self._raise_exception('Invalid Sector Size.') if self.OLEHeader.MiniSectorShift == 0x06: self.mini_sector_size = 64 else: self._raise_exception('Invalid MiniSector Size.') self._init_fat_chain() if self.OLEHeader.NumberOfMiniFATSectors > 0: self._init_minifat_chain() self._init_dir_entry() for i in range(0, len(self.Directory)): if self.Directory[i].Name == '\x05SummaryInformation': self.SummaryInfo = SummaryInfo(self.find_object_by_index(i)) if self.Directory[i].Name == '\x05DocumentSummaryInformation': self.DocumentSummaryInfo = DocSummaryInfo(self.find_object_by_index(i)) else: self._raise_exception('Invalid file: ' + filename) def _init_fat_chain(self): self.DIFAT = list(self.OLEHeader.DIFAT) if self.OLEHeader.NumberOfDIFATSectors > 0: difat_sector_index = self.OLEHeader.FirstDIFATSector for i in range(0, self.OLEHeader.NumberOfDIFATSectors): difat_sector_offset = (difat_sector_index+1) * self.sector_size self.ole_logger.debug('DIFAT sector #' + str(i) + ' at offset: ' + str(hex(difat_sector_offset))) for j in range(0, self.sector_size/4-1): difat = struct.unpack('<I', self.file_data[difat_sector_offset+j*4:difat_sector_offset+j*4+4])[0] if difat == 0xFFFFFFFF: if i+1 == self.OLEHeader.NumberOfDIFATSectors: break else: _raise_exception('Encounter an invalid DIFAT value when parsing DIFAT chain.') self.ole_logger.debug('DIFT[' + str(len(self.DIFAT)) + ']: ' + str(hex(difat))) self.DIFAT.append(difat) difat_sector_index = struct.unpack('<I', self.file_data[difat_sector_offset+j*4:difat_sector_offset+j*4+4])[0] if len(self.DIFAT) != self.OLEHeader.NumberOfFATSectors: self.ole_logger.warn('OLEHeader.NumberOfFATSectors does not mahtch the number of the DIFAT entries.') for i in range(0, self.OLEHeader.NumberOfFATSectors): fat_sector_index = self.DIFAT[i] fat_sector_offset = (fat_sector_index+1) * self.sector_size self.ole_logger.debug('FAT sector #' + str(i) + ' at offset: ' + str(hex(fat_sector_offset))) for j in range(0, self.sector_size/4): fat = struct.unpack('<I', self.file_data[fat_sector_offset+j*4:fat_sector_offset+j*4+4])[0] self.FAT.append(fat) if fat == 0xFFFFFFFC: self.ole_logger.debug('FAT[' + str(len(self.FAT)-1) + '] is a DIFAT sector') if fat == 0xFFFFFFFD: self.ole_logger.debug('FAT[' + str(len(self.FAT)-1) + '] is a FAT sector') def _init_minifat_chain(self): minifat_sector_index = self.OLEHeader.FirstMiniFATSector i = 0 while i < self.OLEHeader.NumberOfMiniFATSectors: minifat_sector_offset = (minifat_sector_index+1) * self.sector_size self.ole_logger.debug('MiniFAT sector #' + str(i) + ' at offset: ' + str(hex(minifat_sector_offset))) for j in range(0, self.sector_size/4): minifat = struct.unpack('<I', self.file_data[minifat_sector_offset+j*4:minifat_sector_offset+j*4+4])[0] self.MiniFAT.append(minifat) minifat_sector_index = self.FAT[minifat_sector_index] if minifat_sector_index == 0xFFFFFFFE: self.ole_logger.debug('MiniFAT sector chain ended.') break i += 1 if (i+1) != self.OLEHeader.NumberOfMiniFATSectors: self.ole_logger.warn('self.OLEHeader.NumberOfMiniFATSectors does not match the length of the MiniFat sector chian.') def _init_dir_entry(self): dir_sector_index = self.OLEHeader.FirstDirecotrySector is_end = False while True: dir_sector_offset = (dir_sector_index+1) * self.sector_size for i in range(0, self.sector_size/128): if (dir_sector_offset+i*128+128) > len(self.file_data): self.ole_logger.warning('Direcotry sector offset larger than file size.') is_end = True break dir_data = self.file_data[dir_sector_offset+i*128:dir_sector_offset+i*128+128] if struct.unpack('<H', dir_data[0x40:0x42])[0] == 0: is_end = True break self.ole_logger.debug('[----- Directory #' + str(len(self.Directory)) + ' -----]') try: directory = Directory(dir_data) self.Directory.append(directory) except: self.ole_logger.debug('Directory #' + str(len(self.Directory)) + ' contains abnormal structure.') dir_sector_index = self.FAT[dir_sector_index] if is_end or dir_sector_index == 0xFFFFFFFE: break def find_object_by_name(self, name): data = '' dir_number = len(self.Directory) for i in range(0, dir_number): directory = self.Directory[i] if name == directory.Name: if directory.ObjectType != 0x02 and directory.ObjectType != 0x05: return directory sector_index = directory.StartingSector if sector_index == 0xFFFFFFFE: self.ole_logger.debug('Object: ' + name + ' has no data.') return None if directory.StreamSize < self.OLEHeader.MiniStreamCutoffSize and len(self.MiniFAT) > 0 and name != 'Root Entry': ministream = self.find_object_by_name('Root Entry') if len(ministream) > 0: while sector_index != 0xFFFFFFFE: sector_offset = sector_index * 0x40 data += ministream[sector_offset:sector_offset+0x40] sector_index = self.MiniFAT[sector_index] else: self.ole_logger.debug('Mini Stream is null.') return None else: while sector_index != 0xFFFFFFFE: sector_offset = (sector_index+1) * self.sector_size data += self.file_data[sector_offset:sector_offset+self.sector_size] sector_index = self.FAT[sector_index] break if (i+1) == dir_number: self.ole_logger.debug('Could not find object: ' + name) return None if directory.StreamSize > len(data): self.ole_logger.warn('DirectoryEntry.StreamSize larger than real data size.') return None return data[0: directory.StreamSize] def find_object_by_index(self, index): data = '' if index < 0 or index >= len(self.Directory): self.ole_logger.warn('Index out of boundary.') return None directory = self.Directory[index] if directory.ObjectType != 0x02 and directory.ObjectType != 0x05: return directory sector_index = directory.StartingSector if sector_index == 0xFFFFFFFE: self.ole_logger.debug('Object #' + str(index) + ' has no data.') return None if directory.StreamSize < self.OLEHeader.MiniStreamCutoffSize and len(self.MiniFAT) > 0: ministream = self.find_object_by_name('Root Entry') if len(ministream) > 0: while sector_index != 0xFFFFFFFE: sector_offset = sector_index * 0x40 data += ministream[sector_offset:sector_offset+0x40] sector_index = self.MiniFAT[sector_index] else: self.ole_logger.debug('Mini Stream is null.') return None else: while sector_index != 0xFFFFFFFE: sector_offset = (sector_index+1) * self.sector_size data += self.file_data[sector_offset:sector_offset+self.sector_size] sector_index = self.FAT[sector_index] if directory.StreamSize > len(data): self.ole_logger.warn('DirectoryEntry.StreamSize larger than real data size.') return None return data[0: directory.StreamSize] if __name__ == '__main__': debug = True init_logging(debug)
z3r0zh0u/pyole
pyole.py
Python
mit
60,571
# Copyright 2011 Google Inc. All Rights Reserved. """Locked file interface that should work on Unix and Windows pythons. This module first tries to use fcntl locking to ensure serialized access to a file, then falls back on a lock file if that is unavialable. Usage: f = LockedFile('filename', 'r+b', 'rb') f.open_and_lock() if f.is_locked(): print 'Acquired filename with r+b mode' f.file_handle().write('locked data') else: print 'Aquired filename with rb mode' f.unlock_and_close() """ __author__ = '[email protected] (David T McWherter)' import errno import logging import os import time from oauth2client import util logger = logging.getLogger(__name__) class AlreadyLockedException(Exception): """Trying to lock a file that has already been locked by the LockedFile.""" pass class _Opener(object): """Base class for different locking primitives.""" def __init__(self, filename, mode, fallback_mode): """Create an Opener. Args: filename: string, The pathname of the file. mode: string, The preferred mode to access the file with. fallback_mode: string, The mode to use if locking fails. """ self._locked = False self._filename = filename self._mode = mode self._fallback_mode = fallback_mode self._fh = None def is_locked(self): """Was the file locked.""" return self._locked def file_handle(self): """The file handle to the file. Valid only after opened.""" return self._fh def filename(self): """The filename that is being locked.""" return self._filename def open_and_lock(self, timeout, delay): """Open the file and lock it. Args: timeout: float, How long to try to lock for. delay: float, How long to wait between retries. """ pass def unlock_and_close(self): """Unlock and close the file.""" pass class _PosixOpener(_Opener): """Lock files using Posix advisory lock files.""" def open_and_lock(self, timeout, delay): """Open the file and lock it. Tries to create a .lock file next to the file we're trying to open. Args: timeout: float, How long to try to lock for. delay: float, How long to wait between retries. Raises: AlreadyLockedException: if the lock is already acquired. IOError: if the open fails. """ if self._locked: raise AlreadyLockedException('File %s is already locked' % self._filename) self._locked = False try: self._fh = open(self._filename, self._mode) except IOError as e: # If we can't access with _mode, try _fallback_mode and don't lock. if e.errno == errno.EACCES: self._fh = open(self._filename, self._fallback_mode) return lock_filename = self._posix_lockfile(self._filename) start_time = time.time() while True: try: self._lock_fd = os.open(lock_filename, os.O_CREAT|os.O_EXCL|os.O_RDWR) self._locked = True break except OSError as e: if e.errno != errno.EEXIST: raise if (time.time() - start_time) >= timeout: logger.warn('Could not acquire lock %s in %s seconds' % ( lock_filename, timeout)) # Close the file and open in fallback_mode. if self._fh: self._fh.close() self._fh = open(self._filename, self._fallback_mode) return time.sleep(delay) def unlock_and_close(self): """Unlock a file by removing the .lock file, and close the handle.""" if self._locked: lock_filename = self._posix_lockfile(self._filename) os.unlink(lock_filename) os.close(self._lock_fd) self._locked = False self._lock_fd = None if self._fh: self._fh.close() def _posix_lockfile(self, filename): """The name of the lock file to use for posix locking.""" return '%s.lock' % filename try: import fcntl class _FcntlOpener(_Opener): """Open, lock, and unlock a file using fcntl.lockf.""" def open_and_lock(self, timeout, delay): """Open the file and lock it. Args: timeout: float, How long to try to lock for. delay: float, How long to wait between retries Raises: AlreadyLockedException: if the lock is already acquired. IOError: if the open fails. """ if self._locked: raise AlreadyLockedException('File %s is already locked' % self._filename) start_time = time.time() try: self._fh = open(self._filename, self._mode) except IOError as e: # If we can't access with _mode, try _fallback_mode and don't lock. if e.errno == errno.EACCES: self._fh = open(self._filename, self._fallback_mode) return # We opened in _mode, try to lock the file. while True: try: fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX) self._locked = True return except IOError as e: # If not retrying, then just pass on the error. if timeout == 0: raise e if e.errno != errno.EACCES: raise e # We could not acquire the lock. Try again. if (time.time() - start_time) >= timeout: logger.warn('Could not lock %s in %s seconds' % ( self._filename, timeout)) if self._fh: self._fh.close() self._fh = open(self._filename, self._fallback_mode) return time.sleep(delay) def unlock_and_close(self): """Close and unlock the file using the fcntl.lockf primitive.""" if self._locked: fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN) self._locked = False if self._fh: self._fh.close() except ImportError: _FcntlOpener = None try: import pywintypes import win32con import win32file class _Win32Opener(_Opener): """Open, lock, and unlock a file using windows primitives.""" # Error #33: # 'The process cannot access the file because another process' FILE_IN_USE_ERROR = 33 # Error #158: # 'The segment is already unlocked.' FILE_ALREADY_UNLOCKED_ERROR = 158 def open_and_lock(self, timeout, delay): """Open the file and lock it. Args: timeout: float, How long to try to lock for. delay: float, How long to wait between retries Raises: AlreadyLockedException: if the lock is already acquired. IOError: if the open fails. """ if self._locked: raise AlreadyLockedException('File %s is already locked' % self._filename) start_time = time.time() try: self._fh = open(self._filename, self._mode) except IOError as e: # If we can't access with _mode, try _fallback_mode and don't lock. if e.errno == errno.EACCES: self._fh = open(self._filename, self._fallback_mode) return # We opened in _mode, try to lock the file. while True: try: hfile = win32file._get_osfhandle(self._fh.fileno()) win32file.LockFileEx( hfile, (win32con.LOCKFILE_FAIL_IMMEDIATELY| win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000, pywintypes.OVERLAPPED()) self._locked = True return except pywintypes.error as e: if timeout == 0: raise e # If the error is not that the file is already in use, raise. if e[0] != _Win32Opener.FILE_IN_USE_ERROR: raise # We could not acquire the lock. Try again. if (time.time() - start_time) >= timeout: logger.warn('Could not lock %s in %s seconds' % ( self._filename, timeout)) if self._fh: self._fh.close() self._fh = open(self._filename, self._fallback_mode) return time.sleep(delay) def unlock_and_close(self): """Close and unlock the file using the win32 primitive.""" if self._locked: try: hfile = win32file._get_osfhandle(self._fh.fileno()) win32file.UnlockFileEx(hfile, 0, -0x10000, pywintypes.OVERLAPPED()) except pywintypes.error as e: if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR: raise self._locked = False if self._fh: self._fh.close() except ImportError: _Win32Opener = None class LockedFile(object): """Represent a file that has exclusive access.""" @util.positional(4) def __init__(self, filename, mode, fallback_mode, use_native_locking=True): """Construct a LockedFile. Args: filename: string, The path of the file to open. mode: string, The mode to try to open the file with. fallback_mode: string, The mode to use if locking fails. use_native_locking: bool, Whether or not fcntl/win32 locking is used. """ opener = None if not opener and use_native_locking: if _Win32Opener: opener = _Win32Opener(filename, mode, fallback_mode) if _FcntlOpener: opener = _FcntlOpener(filename, mode, fallback_mode) if not opener: opener = _PosixOpener(filename, mode, fallback_mode) self._opener = opener def filename(self): """Return the filename we were constructed with.""" return self._opener._filename def file_handle(self): """Return the file_handle to the opened file.""" return self._opener.file_handle() def is_locked(self): """Return whether we successfully locked the file.""" return self._opener.is_locked() def open_and_lock(self, timeout=0, delay=0.05): """Open the file, trying to lock it. Args: timeout: float, The number of seconds to try to acquire the lock. delay: float, The number of seconds to wait between retry attempts. Raises: AlreadyLockedException: if the lock is already acquired. IOError: if the open fails. """ self._opener.open_and_lock(timeout, delay) def unlock_and_close(self): """Unlock and close a file.""" self._opener.unlock_and_close()
samuelclay/NewsBlur
vendor/oauth2client/locked_file.py
Python
mit
10,284
# -*- coding: utf-8 -*- """ sphinx.ext.autosummary.generate ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Usable as a library or script to generate automatic RST source files for items referred to in autosummary:: directives. Each generated RST file contains a single auto*:: directive which extracts the docstring of the referred item. Example Makefile rule:: generate: sphinx-autogen -o source/generated source/*.rst :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import re import sys import pydoc import optparse import inspect from jinja2 import FileSystemLoader, TemplateNotFound from jinja2.sandbox import SandboxedEnvironment from sphinx import package_dir from ..autosummary import import_by_name, get_documenter from sphinx.jinja2glue import BuiltinTemplateLoader from sphinx.util.osutil import ensuredir from sphinx.util.inspect import safe_getattr def main(argv=sys.argv): usage = """%prog [OPTIONS] SOURCEFILE ...""" p = optparse.OptionParser(usage.strip()) p.add_option("-o", "--output-dir", action="store", type="string", dest="output_dir", default=None, help="Directory to place all output in") p.add_option("-s", "--suffix", action="store", type="string", dest="suffix", default="rst", help="Default suffix for files (default: %default)") p.add_option("-t", "--templates", action="store", type="string", dest="templates", default=None, help="Custom template directory (default: %default)") options, args = p.parse_args(argv[1:]) if len(args) < 1: p.error('no input files given') generate_autosummary_docs(args, options.output_dir, "." + options.suffix, template_dir=options.templates) def _simple_info(msg): print msg def _simple_warn(msg): print >> sys.stderr, 'WARNING: ' + msg # -- Generating output --------------------------------------------------------- def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', warn=_simple_warn, info=_simple_info, base_path=None, builder=None, template_dir=None): showed_sources = list(sorted(sources)) if len(showed_sources) > 20: showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:] info('[autosummary] generating autosummary for: %s' % ', '.join(showed_sources)) if output_dir: info('[autosummary] writing to %s' % output_dir) if base_path is not None: sources = [os.path.join(base_path, filename) for filename in sources] # create our own templating environment template_dirs = [os.path.join(package_dir, 'ext', 'autosummary', 'templates')] if builder is not None: # allow the user to override the templates template_loader = BuiltinTemplateLoader() template_loader.init(builder, dirs=template_dirs) else: if template_dir: template_dirs.insert(0, template_dir) template_loader = FileSystemLoader(template_dirs) template_env = SandboxedEnvironment(loader=template_loader) # read items = find_autosummary_in_files(sources) # remove possible duplicates items = dict([(item, True) for item in items]).keys() # keep track of new files new_files = [] # write for name, path, template_name in sorted(items): if path is None: # The corresponding autosummary:: directive did not have # a :toctree: option continue path = output_dir or os.path.abspath(path) ensuredir(path) try: name, obj, parent = import_by_name(name) except ImportError, e: warn('[autosummary] failed to import %r: %s' % (name, e)) continue fn = os.path.join(path, name + suffix) # skip it if it exists if os.path.isfile(fn): continue new_files.append(fn) f = open(fn, 'w') try: doc = get_documenter(obj, parent) if template_name is not None: template = template_env.get_template(template_name) else: try: template = template_env.get_template('autosummary/%s.rst' % doc.objtype) except TemplateNotFound: template = template_env.get_template('autosummary/base.rst') def get_members(obj, typ, include_public=[]): items = [] for name in dir(obj): if sys.skip_member(name, obj): continue if typ in ['class', 'function']: c = getattr(obj, name) if inspect.isclass(c) or inspect.isfunction(c): if (c.__module__!=obj.__name__+".base" and c.__module__!=obj.__name__): continue try: documenter = get_documenter(safe_getattr(obj, name), obj) except AttributeError: continue if documenter.objtype == typ: items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] return public, items def def_members(obj, typ, include_public=[]): items = [] try: obj_dict = safe_getattr(obj, '__dict__') except AttributeError: return [] defined = obj_dict.keys() defined.sort() for name in defined: if sys.skip_member(name, obj): continue try: documenter = get_documenter(safe_getattr(obj, name), obj) except AttributeError: continue if documenter.objtype == typ: items.append(name) public = [x for x in items if x in include_public or not x.startswith('_')] return public ns = {} if doc.objtype == 'module': ns['all_members'] = dir(obj) ns['classes'], ns['all_classes'] = \ get_members(obj, 'class') ns['functions'], ns['all_functions'] = \ get_members(obj, 'function') ns['exceptions'], ns['all_exceptions'] = \ get_members(obj, 'exception') if sys.all_submodules.has_key(obj.__name__): ns['submodules'] = sys.all_submodules[obj.__name__] ns['members'] = ns['all_members'] try: obj_dict = safe_getattr(obj, '__dict__') except AttributeError: obj_dict = [] public = [x for x in obj_dict if not x.startswith('_')] for item in ns['classes']+ns['functions']+ns['exceptions']: if item in public: public.remove(item) public.sort() ns['members'] = public ns['constants'] = [x for x in public if not sys.skip_member(x, obj)] elif doc.objtype == 'class': ns['members'] = dir(obj) ns['events'], ns['all_events'] = \ get_members(obj, 'event') ns['methods'], ns['all_methods'] = \ get_members(obj, 'method', ['__init__']) ns['attributes'], ns['all_attributes'] = \ get_members(obj, 'attribute') ns['def_events'] = def_members(obj, 'event') ns['def_methods'] = def_members(obj, 'method', ['__init__']) ns['def_attributes'] = def_members(obj, 'attribute') ns['inherited'] = [] for t in ['events', 'methods', 'attributes']: key = 'inh_' + t ns[key]=[] for item in ns[t]: if not item in ns['def_' + t]: ns['inherited'].append(item) ns[key].append(item) parts = name.split('.') if doc.objtype in ('method', 'attribute'): mod_name = '.'.join(parts[:-2]) cls_name = parts[-2] obj_name = '.'.join(parts[-2:]) ns['class'] = cls_name else: mod_name, obj_name = '.'.join(parts[:-1]), parts[-1] ns['fullname'] = name ns['module'] = mod_name ns['objname'] = obj_name ns['name'] = parts[-1] ns['objtype'] = doc.objtype ns['underline'] = len(name) * '=' rendered = template.render(**ns) f.write(rendered) finally: f.close() # descend recursively to new files if new_files: generate_autosummary_docs(new_files, output_dir=output_dir, suffix=suffix, warn=warn, info=info, base_path=base_path, builder=builder, template_dir=template_dir) # -- Finding documented entries in files --------------------------------------- def find_autosummary_in_files(filenames): """Find out what items are documented in source/*.rst. See `find_autosummary_in_lines`. """ documented = [] for filename in filenames: f = open(filename, 'r') lines = f.read().splitlines() documented.extend(find_autosummary_in_lines(lines, filename=filename)) f.close() return documented def find_autosummary_in_docstring(name, module=None, filename=None): """Find out what items are documented in the given object's docstring. See `find_autosummary_in_lines`. """ try: real_name, obj, parent = import_by_name(name) lines = pydoc.getdoc(obj).splitlines() return find_autosummary_in_lines(lines, module=name, filename=filename) except AttributeError: pass except ImportError, e: print "Failed to import '%s': %s" % (name, e) return [] def find_autosummary_in_lines(lines, module=None, filename=None): """Find out what items appear in autosummary:: directives in the given lines. Returns a list of (name, toctree, template) where *name* is a name of an object and *toctree* the :toctree: path of the corresponding autosummary directive (relative to the root of the file name), and *template* the value of the :template: option. *toctree* and *template* ``None`` if the directive does not have the corresponding options set. """ autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*') automodule_re = re.compile( r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$') module_re = re.compile( r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?') toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$') documented = [] toctree = None template = None current_module = module in_autosummary = False base_indent = "" for line in lines: if in_autosummary: m = toctree_arg_re.match(line) if m: toctree = m.group(1) if filename: toctree = os.path.join(os.path.dirname(filename), toctree) continue m = template_arg_re.match(line) if m: template = m.group(1).strip() continue if line.strip().startswith(':'): continue # skip options m = autosummary_item_re.match(line) if m: name = m.group(1).strip() if name.startswith('~'): name = name[1:] if current_module and \ not name.startswith(current_module + '.'): name = "%s.%s" % (current_module, name) documented.append((name, toctree, template)) continue if not line.strip() or line.startswith(base_indent + " "): continue in_autosummary = False m = autosummary_re.match(line) if m: in_autosummary = True base_indent = m.group(1) toctree = None template = None continue m = automodule_re.search(line) if m: current_module = m.group(1).strip() # recurse into the automodule docstring documented.extend(find_autosummary_in_docstring( current_module, filename=filename)) continue m = module_re.match(line) if m: current_module = m.group(2) continue return documented if __name__ == '__main__': main()
shadowmint/nwidget
lib/pyglet-1.4.4/doc/ext/autosummary/generate.py
Python
apache-2.0
13,676
from libra.repository.mongodb.mongodb import Repository from libra.repository.mongodb.orm import Collection, Property, PropertyDict __all__ = ( "Repository", "Collection", "Property", "PropertyDict" )
pitomba/libra
libra/repository/__init__.py
Python
mit
218
# orm/util.py # Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php import re import types import weakref from . import attributes # noqa from .base import _class_to_mapper # noqa from .base import _never_set # noqa from .base import _none_set # noqa from .base import attribute_str # noqa from .base import class_mapper # noqa from .base import InspectionAttr # noqa from .base import instance_str # noqa from .base import object_mapper # noqa from .base import object_state # noqa from .base import state_attribute_str # noqa from .base import state_class_str # noqa from .base import state_str # noqa from .interfaces import CriteriaOption from .interfaces import MapperProperty # noqa from .interfaces import ORMColumnsClauseRole from .interfaces import ORMEntityColumnsClauseRole from .interfaces import ORMFromClauseRole from .interfaces import PropComparator # noqa from .path_registry import PathRegistry # noqa from .. import event from .. import exc as sa_exc from .. import inspection from .. import sql from .. import util from ..engine.result import result_tuple from ..sql import base as sql_base from ..sql import coercions from ..sql import expression from ..sql import lambdas from ..sql import roles from ..sql import util as sql_util from ..sql import visitors from ..sql.annotation import SupportsCloneAnnotations from ..sql.base import ColumnCollection all_cascades = frozenset( ( "delete", "delete-orphan", "all", "merge", "expunge", "save-update", "refresh-expire", "none", ) ) class CascadeOptions(frozenset): """Keeps track of the options sent to :paramref:`.relationship.cascade`""" _add_w_all_cascades = all_cascades.difference( ["all", "none", "delete-orphan"] ) _allowed_cascades = all_cascades _viewonly_cascades = ["expunge", "all", "none", "refresh-expire"] __slots__ = ( "save_update", "delete", "refresh_expire", "merge", "expunge", "delete_orphan", ) def __new__(cls, value_list): if isinstance(value_list, util.string_types) or value_list is None: return cls.from_string(value_list) values = set(value_list) if values.difference(cls._allowed_cascades): raise sa_exc.ArgumentError( "Invalid cascade option(s): %s" % ", ".join( [ repr(x) for x in sorted( values.difference(cls._allowed_cascades) ) ] ) ) if "all" in values: values.update(cls._add_w_all_cascades) if "none" in values: values.clear() values.discard("all") self = frozenset.__new__(CascadeOptions, values) self.save_update = "save-update" in values self.delete = "delete" in values self.refresh_expire = "refresh-expire" in values self.merge = "merge" in values self.expunge = "expunge" in values self.delete_orphan = "delete-orphan" in values if self.delete_orphan and not self.delete: util.warn( "The 'delete-orphan' cascade " "option requires 'delete'." ) return self def __repr__(self): return "CascadeOptions(%r)" % (",".join([x for x in sorted(self)])) @classmethod def from_string(cls, arg): values = [c for c in re.split(r"\s*,\s*", arg or "") if c] return cls(values) def _validator_events(desc, key, validator, include_removes, include_backrefs): """Runs a validation method on an attribute value to be set or appended. """ if not include_backrefs: def detect_is_backref(state, initiator): impl = state.manager[key].impl return initiator.impl is not impl if include_removes: def append(state, value, initiator): if initiator.op is not attributes.OP_BULK_REPLACE and ( include_backrefs or not detect_is_backref(state, initiator) ): return validator(state.obj(), key, value, False) else: return value def bulk_set(state, values, initiator): if include_backrefs or not detect_is_backref(state, initiator): obj = state.obj() values[:] = [ validator(obj, key, value, False) for value in values ] def set_(state, value, oldvalue, initiator): if include_backrefs or not detect_is_backref(state, initiator): return validator(state.obj(), key, value, False) else: return value def remove(state, value, initiator): if include_backrefs or not detect_is_backref(state, initiator): validator(state.obj(), key, value, True) else: def append(state, value, initiator): if initiator.op is not attributes.OP_BULK_REPLACE and ( include_backrefs or not detect_is_backref(state, initiator) ): return validator(state.obj(), key, value) else: return value def bulk_set(state, values, initiator): if include_backrefs or not detect_is_backref(state, initiator): obj = state.obj() values[:] = [validator(obj, key, value) for value in values] def set_(state, value, oldvalue, initiator): if include_backrefs or not detect_is_backref(state, initiator): return validator(state.obj(), key, value) else: return value event.listen(desc, "append", append, raw=True, retval=True) event.listen(desc, "bulk_replace", bulk_set, raw=True) event.listen(desc, "set", set_, raw=True, retval=True) if include_removes: event.listen(desc, "remove", remove, raw=True, retval=True) def polymorphic_union( table_map, typecolname, aliasname="p_union", cast_nulls=True ): """Create a ``UNION`` statement used by a polymorphic mapper. See :ref:`concrete_inheritance` for an example of how this is used. :param table_map: mapping of polymorphic identities to :class:`_schema.Table` objects. :param typecolname: string name of a "discriminator" column, which will be derived from the query, producing the polymorphic identity for each row. If ``None``, no polymorphic discriminator is generated. :param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()` construct generated. :param cast_nulls: if True, non-existent columns, which are represented as labeled NULLs, will be passed into CAST. This is a legacy behavior that is problematic on some backends such as Oracle - in which case it can be set to False. """ colnames = util.OrderedSet() colnamemaps = {} types = {} for key in table_map: table = table_map[key] table = coercions.expect( roles.StrictFromClauseRole, table, allow_select=True ) table_map[key] = table m = {} for c in table.c: if c.key == typecolname: raise sa_exc.InvalidRequestError( "Polymorphic union can't use '%s' as the discriminator " "column due to mapped column %r; please apply the " "'typecolname' " "argument; this is available on " "ConcreteBase as '_concrete_discriminator_name'" % (typecolname, c) ) colnames.add(c.key) m[c.key] = c types[c.key] = c.type colnamemaps[table] = m def col(name, table): try: return colnamemaps[table][name] except KeyError: if cast_nulls: return sql.cast(sql.null(), types[name]).label(name) else: return sql.type_coerce(sql.null(), types[name]).label(name) result = [] for type_, table in table_map.items(): if typecolname is not None: result.append( sql.select( *( [col(name, table) for name in colnames] + [ sql.literal_column( sql_util._quote_ddl_expr(type_) ).label(typecolname) ] ) ).select_from(table) ) else: result.append( sql.select( *[col(name, table) for name in colnames] ).select_from(table) ) return sql.union_all(*result).alias(aliasname) def identity_key(*args, **kwargs): r"""Generate "identity key" tuples, as are used as keys in the :attr:`.Session.identity_map` dictionary. This function has several call styles: * ``identity_key(class, ident, identity_token=token)`` This form receives a mapped class and a primary key scalar or tuple as an argument. E.g.:: >>> identity_key(MyClass, (1, 2)) (<class '__main__.MyClass'>, (1, 2), None) :param class: mapped class (must be a positional argument) :param ident: primary key, may be a scalar or tuple argument. :param identity_token: optional identity token .. versionadded:: 1.2 added identity_token * ``identity_key(instance=instance)`` This form will produce the identity key for a given instance. The instance need not be persistent, only that its primary key attributes are populated (else the key will contain ``None`` for those missing values). E.g.:: >>> instance = MyClass(1, 2) >>> identity_key(instance=instance) (<class '__main__.MyClass'>, (1, 2), None) In this form, the given instance is ultimately run though :meth:`_orm.Mapper.identity_key_from_instance`, which will have the effect of performing a database check for the corresponding row if the object is expired. :param instance: object instance (must be given as a keyword arg) * ``identity_key(class, row=row, identity_token=token)`` This form is similar to the class/tuple form, except is passed a database result row as a :class:`.Row` object. E.g.:: >>> row = engine.execute(\ text("select * from table where a=1 and b=2")\ ).first() >>> identity_key(MyClass, row=row) (<class '__main__.MyClass'>, (1, 2), None) :param class: mapped class (must be a positional argument) :param row: :class:`.Row` row returned by a :class:`_engine.CursorResult` (must be given as a keyword arg) :param identity_token: optional identity token .. versionadded:: 1.2 added identity_token """ if args: row = None largs = len(args) if largs == 1: class_ = args[0] try: row = kwargs.pop("row") except KeyError: ident = kwargs.pop("ident") elif largs in (2, 3): class_, ident = args else: raise sa_exc.ArgumentError( "expected up to three positional arguments, " "got %s" % largs ) identity_token = kwargs.pop("identity_token", None) if kwargs: raise sa_exc.ArgumentError( "unknown keyword arguments: %s" % ", ".join(kwargs) ) mapper = class_mapper(class_) if row is None: return mapper.identity_key_from_primary_key( util.to_list(ident), identity_token=identity_token ) else: return mapper.identity_key_from_row( row, identity_token=identity_token ) else: instance = kwargs.pop("instance") if kwargs: raise sa_exc.ArgumentError( "unknown keyword arguments: %s" % ", ".join(kwargs.keys) ) mapper = object_mapper(instance) return mapper.identity_key_from_instance(instance) class ORMAdapter(sql_util.ColumnAdapter): """ColumnAdapter subclass which excludes adaptation of entities from non-matching mappers. """ def __init__( self, entity, equivalents=None, adapt_required=False, allow_label_resolve=True, anonymize_labels=False, ): info = inspection.inspect(entity) self.mapper = info.mapper selectable = info.selectable is_aliased_class = info.is_aliased_class if is_aliased_class: self.aliased_class = entity else: self.aliased_class = None sql_util.ColumnAdapter.__init__( self, selectable, equivalents, adapt_required=adapt_required, allow_label_resolve=allow_label_resolve, anonymize_labels=anonymize_labels, include_fn=self._include_fn, ) def _include_fn(self, elem): entity = elem._annotations.get("parentmapper", None) return not entity or entity.isa(self.mapper) class AliasedClass(object): r"""Represents an "aliased" form of a mapped class for usage with Query. The ORM equivalent of a :func:`~sqlalchemy.sql.expression.alias` construct, this object mimics the mapped class using a ``__getattr__`` scheme and maintains a reference to a real :class:`~sqlalchemy.sql.expression.Alias` object. A primary purpose of :class:`.AliasedClass` is to serve as an alternate within a SQL statement generated by the ORM, such that an existing mapped entity can be used in multiple contexts. A simple example:: # find all pairs of users with the same name user_alias = aliased(User) session.query(User, user_alias).\ join((user_alias, User.id > user_alias.id)).\ filter(User.name == user_alias.name) :class:`.AliasedClass` is also capable of mapping an existing mapped class to an entirely new selectable, provided this selectable is column- compatible with the existing mapped selectable, and it can also be configured in a mapping as the target of a :func:`_orm.relationship`. See the links below for examples. The :class:`.AliasedClass` object is constructed typically using the :func:`_orm.aliased` function. It also is produced with additional configuration when using the :func:`_orm.with_polymorphic` function. The resulting object is an instance of :class:`.AliasedClass`. This object implements an attribute scheme which produces the same attribute and method interface as the original mapped class, allowing :class:`.AliasedClass` to be compatible with any attribute technique which works on the original class, including hybrid attributes (see :ref:`hybrids_toplevel`). The :class:`.AliasedClass` can be inspected for its underlying :class:`_orm.Mapper`, aliased selectable, and other information using :func:`_sa.inspect`:: from sqlalchemy import inspect my_alias = aliased(MyClass) insp = inspect(my_alias) The resulting inspection object is an instance of :class:`.AliasedInsp`. .. seealso:: :func:`.aliased` :func:`.with_polymorphic` :ref:`relationship_aliased_class` :ref:`relationship_to_window_function` """ def __init__( self, mapped_class_or_ac, alias=None, name=None, flat=False, adapt_on_names=False, # TODO: None for default here? with_polymorphic_mappers=(), with_polymorphic_discriminator=None, base_alias=None, use_mapper_path=False, represents_outer_join=False, ): insp = inspection.inspect(mapped_class_or_ac) mapper = insp.mapper if alias is None: alias = mapper._with_polymorphic_selectable._anonymous_fromclause( name=name, flat=flat, ) self._aliased_insp = AliasedInsp( self, insp, alias, name, with_polymorphic_mappers if with_polymorphic_mappers else mapper.with_polymorphic_mappers, with_polymorphic_discriminator if with_polymorphic_discriminator is not None else mapper.polymorphic_on, base_alias, use_mapper_path, adapt_on_names, represents_outer_join, ) self.__name__ = "AliasedClass_%s" % mapper.class_.__name__ @classmethod def _reconstitute_from_aliased_insp(cls, aliased_insp): obj = cls.__new__(cls) obj.__name__ = "AliasedClass_%s" % aliased_insp.mapper.class_.__name__ obj._aliased_insp = aliased_insp if aliased_insp._is_with_polymorphic: for sub_aliased_insp in aliased_insp._with_polymorphic_entities: if sub_aliased_insp is not aliased_insp: ent = AliasedClass._reconstitute_from_aliased_insp( sub_aliased_insp ) setattr(obj, sub_aliased_insp.class_.__name__, ent) return obj def __getattr__(self, key): try: _aliased_insp = self.__dict__["_aliased_insp"] except KeyError: raise AttributeError() else: target = _aliased_insp._target # maintain all getattr mechanics attr = getattr(target, key) # attribute is a method, that will be invoked against a # "self"; so just return a new method with the same function and # new self if hasattr(attr, "__call__") and hasattr(attr, "__self__"): return types.MethodType(attr.__func__, self) # attribute is a descriptor, that will be invoked against a # "self"; so invoke the descriptor against this self if hasattr(attr, "__get__"): attr = attr.__get__(None, self) # attributes within the QueryableAttribute system will want this # to be invoked so the object can be adapted if hasattr(attr, "adapt_to_entity"): attr = attr.adapt_to_entity(_aliased_insp) setattr(self, key, attr) return attr def _get_from_serialized(self, key, mapped_class, aliased_insp): # this method is only used in terms of the # sqlalchemy.ext.serializer extension attr = getattr(mapped_class, key) if hasattr(attr, "__call__") and hasattr(attr, "__self__"): return types.MethodType(attr.__func__, self) # attribute is a descriptor, that will be invoked against a # "self"; so invoke the descriptor against this self if hasattr(attr, "__get__"): attr = attr.__get__(None, self) # attributes within the QueryableAttribute system will want this # to be invoked so the object can be adapted if hasattr(attr, "adapt_to_entity"): aliased_insp._weak_entity = weakref.ref(self) attr = attr.adapt_to_entity(aliased_insp) setattr(self, key, attr) return attr def __repr__(self): return "<AliasedClass at 0x%x; %s>" % ( id(self), self._aliased_insp._target.__name__, ) def __str__(self): return str(self._aliased_insp) class AliasedInsp( ORMEntityColumnsClauseRole, ORMFromClauseRole, sql_base.MemoizedHasCacheKey, InspectionAttr, ): """Provide an inspection interface for an :class:`.AliasedClass` object. The :class:`.AliasedInsp` object is returned given an :class:`.AliasedClass` using the :func:`_sa.inspect` function:: from sqlalchemy import inspect from sqlalchemy.orm import aliased my_alias = aliased(MyMappedClass) insp = inspect(my_alias) Attributes on :class:`.AliasedInsp` include: * ``entity`` - the :class:`.AliasedClass` represented. * ``mapper`` - the :class:`_orm.Mapper` mapping the underlying class. * ``selectable`` - the :class:`_expression.Alias` construct which ultimately represents an aliased :class:`_schema.Table` or :class:`_expression.Select` construct. * ``name`` - the name of the alias. Also is used as the attribute name when returned in a result tuple from :class:`_query.Query`. * ``with_polymorphic_mappers`` - collection of :class:`_orm.Mapper` objects indicating all those mappers expressed in the select construct for the :class:`.AliasedClass`. * ``polymorphic_on`` - an alternate column or SQL expression which will be used as the "discriminator" for a polymorphic load. .. seealso:: :ref:`inspection_toplevel` """ def __init__( self, entity, inspected, selectable, name, with_polymorphic_mappers, polymorphic_on, _base_alias, _use_mapper_path, adapt_on_names, represents_outer_join, ): mapped_class_or_ac = inspected.entity mapper = inspected.mapper self._weak_entity = weakref.ref(entity) self.mapper = mapper self.selectable = ( self.persist_selectable ) = self.local_table = selectable self.name = name self.polymorphic_on = polymorphic_on self._base_alias = weakref.ref(_base_alias or self) self._use_mapper_path = _use_mapper_path self.represents_outer_join = represents_outer_join if with_polymorphic_mappers: self._is_with_polymorphic = True self.with_polymorphic_mappers = with_polymorphic_mappers self._with_polymorphic_entities = [] for poly in self.with_polymorphic_mappers: if poly is not mapper: ent = AliasedClass( poly.class_, selectable, base_alias=self, adapt_on_names=adapt_on_names, use_mapper_path=_use_mapper_path, ) setattr(self.entity, poly.class_.__name__, ent) self._with_polymorphic_entities.append(ent._aliased_insp) else: self._is_with_polymorphic = False self.with_polymorphic_mappers = [mapper] self._adapter = sql_util.ColumnAdapter( selectable, equivalents=mapper._equivalent_columns, adapt_on_names=adapt_on_names, anonymize_labels=True, # make sure the adapter doesn't try to grab other tables that # are not even the thing we are mapping, such as embedded # selectables in subqueries or CTEs. See issue #6060 adapt_from_selectables=[ m.selectable for m in self.with_polymorphic_mappers ], ) if inspected.is_aliased_class: self._adapter = inspected._adapter.wrap(self._adapter) self._adapt_on_names = adapt_on_names self._target = mapped_class_or_ac # self._target = mapper.class_ # mapped_class_or_ac @property def entity(self): # to eliminate reference cycles, the AliasedClass is held weakly. # this produces some situations where the AliasedClass gets lost, # particularly when one is created internally and only the AliasedInsp # is passed around. # to work around this case, we just generate a new one when we need # it, as it is a simple class with very little initial state on it. ent = self._weak_entity() if ent is None: ent = AliasedClass._reconstitute_from_aliased_insp(self) self._weak_entity = weakref.ref(ent) return ent is_aliased_class = True "always returns True" @util.memoized_instancemethod def __clause_element__(self): return self.selectable._annotate( { "parentmapper": self.mapper, "parententity": self, "entity_namespace": self, } )._set_propagate_attrs( {"compile_state_plugin": "orm", "plugin_subject": self} ) @property def entity_namespace(self): return self.entity _cache_key_traversal = [ ("name", visitors.ExtendedInternalTraversal.dp_string), ("_adapt_on_names", visitors.ExtendedInternalTraversal.dp_boolean), ("selectable", visitors.ExtendedInternalTraversal.dp_clauseelement), ] @property def class_(self): """Return the mapped class ultimately represented by this :class:`.AliasedInsp`.""" return self.mapper.class_ @property def _path_registry(self): if self._use_mapper_path: return self.mapper._path_registry else: return PathRegistry.per_mapper(self) def __getstate__(self): return { "entity": self.entity, "mapper": self.mapper, "alias": self.selectable, "name": self.name, "adapt_on_names": self._adapt_on_names, "with_polymorphic_mappers": self.with_polymorphic_mappers, "with_polymorphic_discriminator": self.polymorphic_on, "base_alias": self._base_alias(), "use_mapper_path": self._use_mapper_path, "represents_outer_join": self.represents_outer_join, } def __setstate__(self, state): self.__init__( state["entity"], state["mapper"], state["alias"], state["name"], state["with_polymorphic_mappers"], state["with_polymorphic_discriminator"], state["base_alias"], state["use_mapper_path"], state["adapt_on_names"], state["represents_outer_join"], ) def _adapt_element(self, elem, key=None): d = { "parententity": self, "parentmapper": self.mapper, } if key: d["proxy_key"] = key return ( self._adapter.traverse(elem) ._annotate(d) ._set_propagate_attrs( {"compile_state_plugin": "orm", "plugin_subject": self} ) ) def _entity_for_mapper(self, mapper): self_poly = self.with_polymorphic_mappers if mapper in self_poly: if mapper is self.mapper: return self else: return getattr( self.entity, mapper.class_.__name__ )._aliased_insp elif mapper.isa(self.mapper): return self else: assert False, "mapper %s doesn't correspond to %s" % (mapper, self) @util.memoized_property def _get_clause(self): onclause, replacemap = self.mapper._get_clause return ( self._adapter.traverse(onclause), { self._adapter.traverse(col): param for col, param in replacemap.items() }, ) @util.memoized_property def _memoized_values(self): return {} @util.memoized_property def _all_column_expressions(self): if self._is_with_polymorphic: cols_plus_keys = self.mapper._columns_plus_keys( [ent.mapper for ent in self._with_polymorphic_entities] ) else: cols_plus_keys = self.mapper._columns_plus_keys() cols_plus_keys = [ (key, self._adapt_element(col)) for key, col in cols_plus_keys ] return ColumnCollection(cols_plus_keys) def _memo(self, key, callable_, *args, **kw): if key in self._memoized_values: return self._memoized_values[key] else: self._memoized_values[key] = value = callable_(*args, **kw) return value def __repr__(self): if self.with_polymorphic_mappers: with_poly = "(%s)" % ", ".join( mp.class_.__name__ for mp in self.with_polymorphic_mappers ) else: with_poly = "" return "<AliasedInsp at 0x%x; %s%s>" % ( id(self), self.class_.__name__, with_poly, ) def __str__(self): if self._is_with_polymorphic: return "with_polymorphic(%s, [%s])" % ( self._target.__name__, ", ".join( mp.class_.__name__ for mp in self.with_polymorphic_mappers if mp is not self.mapper ), ) else: return "aliased(%s)" % (self._target.__name__,) class _WrapUserEntity(object): """A wrapper used within the loader_criteria lambda caller so that we can bypass declared_attr descriptors on unmapped mixins, which normally emit a warning for such use. might also be useful for other per-lambda instrumentations should the need arise. """ def __init__(self, subject): self.subject = subject @util.preload_module("sqlalchemy.orm.decl_api") def __getattribute__(self, name): decl_api = util.preloaded.orm.decl_api subject = object.__getattribute__(self, "subject") if name in subject.__dict__ and isinstance( subject.__dict__[name], decl_api.declared_attr ): return subject.__dict__[name].fget(subject) else: return getattr(subject, name) class LoaderCriteriaOption(CriteriaOption): """Add additional WHERE criteria to the load for all occurrences of a particular entity. :class:`_orm.LoaderCriteriaOption` is invoked using the :func:`_orm.with_loader_criteria` function; see that function for details. .. versionadded:: 1.4 """ _traverse_internals = [ ("root_entity", visitors.ExtendedInternalTraversal.dp_plain_obj), ("entity", visitors.ExtendedInternalTraversal.dp_has_cache_key), ("where_criteria", visitors.InternalTraversal.dp_clauseelement), ("include_aliases", visitors.InternalTraversal.dp_boolean), ("propagate_to_loaders", visitors.InternalTraversal.dp_boolean), ] def __init__( self, entity_or_base, where_criteria, loader_only=False, include_aliases=False, propagate_to_loaders=True, track_closure_variables=True, ): """Add additional WHERE criteria to the load for all occurrences of a particular entity. .. versionadded:: 1.4 The :func:`_orm.with_loader_criteria` option is intended to add limiting criteria to a particular kind of entity in a query, **globally**, meaning it will apply to the entity as it appears in the SELECT query as well as within any subqueries, join conditions, and relationship loads, including both eager and lazy loaders, without the need for it to be specified in any particular part of the query. The rendering logic uses the same system used by single table inheritance to ensure a certain discriminator is applied to a table. E.g., using :term:`2.0-style` queries, we can limit the way the ``User.addresses`` collection is loaded, regardless of the kind of loading used:: from sqlalchemy.orm import with_loader_criteria stmt = select(User).options( selectinload(User.addresses), with_loader_criteria(Address, Address.email_address != 'foo')) ) Above, the "selectinload" for ``User.addresses`` will apply the given filtering criteria to the WHERE clause. Another example, where the filtering will be applied to the ON clause of the join, in this example using :term:`1.x style` queries:: q = session.query(User).outerjoin(User.addresses).options( with_loader_criteria(Address, Address.email_address != 'foo')) ) The primary purpose of :func:`_orm.with_loader_criteria` is to use it in the :meth:`_orm.SessionEvents.do_orm_execute` event handler to ensure that all occurrences of a particular entity are filtered in a certain way, such as filtering for access control roles. It also can be used to apply criteria to relationship loads. In the example below, we can apply a certain set of rules to all queries emitted by a particular :class:`_orm.Session`:: session = Session(bind=engine) @event.listens_for("do_orm_execute", session) def _add_filtering_criteria(execute_state): if ( execute_state.is_select and not execute_state.is_column_load and not execute_state.is_relationship_load ): execute_state.statement = execute_state.statement.options( with_loader_criteria( SecurityRole, lambda cls: cls.role.in_(['some_role']), include_aliases=True ) ) In the above example, the :meth:`_orm.SessionEvents.do_orm_execute` event will intercept all queries emitted using the :class:`_orm.Session`. For those queries which are SELECT statements and are not attribute or relationship loads a custom :func:`_orm.with_loader_criteria` option is added to the query. The :func:`_orm.with_loader_criteria` option will be used in the given statement and will also be automatically propagated to all relationship loads that descend from this query. The criteria argument given is a ``lambda`` that accepts a ``cls`` argument. The given class will expand to include all mapped subclass and need not itself be a mapped class. .. tip:: When using :func:`_orm.with_loader_criteria` option in conjunction with the :func:`_orm.contains_eager` loader option, it's important to note that :func:`_orm.with_loader_criteria` only affects the part of the query that determines what SQL is rendered in terms of the WHERE and FROM clauses. The :func:`_orm.contains_eager` option does not affect the rendering of the SELECT statement outside of the columns clause, so does not have any interaction with the :func:`_orm.with_loader_criteria` option. However, the way things "work" is that :func:`_orm.contains_eager` is meant to be used with a query that is already selecting from the additional entities in some way, where :func:`_orm.with_loader_criteria` can apply it's additional criteria. In the example below, assuming a mapping relationship as ``A -> A.bs -> B``, the given :func:`_orm.with_loader_criteria` option will affect the way in which the JOIN is rendered:: stmt = select(A).join(A.bs).options( contains_eager(A.bs), with_loader_criteria(B, B.flag == 1) ) Above, the given :func:`_orm.with_loader_criteria` option will affect the ON clause of the JOIN that is specified by ``.join(A.bs)``, so is applied as expected. The :func:`_orm.contains_eager` option has the effect that columns from ``B`` are added to the columns clause:: SELECT b.id, b.a_id, b.data, b.flag, a.id AS id_1, a.data AS data_1 FROM a JOIN b ON a.id = b.a_id AND b.flag = :flag_1 The use of the :func:`_orm.contains_eager` option within the above statement has no effect on the behavior of the :func:`_orm.with_loader_criteria` option. If the :func:`_orm.contains_eager` option were omitted, the SQL would be the same as regards the FROM and WHERE clauses, where :func:`_orm.with_loader_criteria` continues to add its criteria to the ON clause of the JOIN. The addition of :func:`_orm.contains_eager` only affects the columns clause, in that additional columns against ``b`` are added which are then consumed by the ORM to produce ``B`` instances. .. warning:: The use of a lambda inside of the call to :func:`_orm.with_loader_criteria` is only invoked **once per unique class**. Custom functions should not be invoked within this lambda. See :ref:`engine_lambda_caching` for an overview of the "lambda SQL" feature, which is for advanced use only. :param entity_or_base: a mapped class, or a class that is a super class of a particular set of mapped classes, to which the rule will apply. :param where_criteria: a Core SQL expression that applies limiting criteria. This may also be a "lambda:" or Python function that accepts a target class as an argument, when the given class is a base with many different mapped subclasses. :param include_aliases: if True, apply the rule to :func:`_orm.aliased` constructs as well. :param propagate_to_loaders: defaults to True, apply to relationship loaders such as lazy loaders. .. seealso:: :ref:`examples_session_orm_events` - includes examples of using :func:`_orm.with_loader_criteria`. :ref:`do_orm_execute_global_criteria` - basic example on how to combine :func:`_orm.with_loader_criteria` with the :meth:`_orm.SessionEvents.do_orm_execute` event. :param track_closure_variables: when False, closure variables inside of a lambda expression will not be used as part of any cache key. This allows more complex expressions to be used inside of a lambda expression but requires that the lambda ensures it returns the identical SQL every time given a particular class. .. versionadded:: 1.4.0b2 """ entity = inspection.inspect(entity_or_base, False) if entity is None: self.root_entity = entity_or_base self.entity = None else: self.root_entity = None self.entity = entity if callable(where_criteria): self.deferred_where_criteria = True self.where_criteria = lambdas.DeferredLambdaElement( where_criteria, roles.WhereHavingRole, lambda_args=( _WrapUserEntity( self.root_entity if self.root_entity is not None else self.entity.entity, ), ), opts=lambdas.LambdaOptions( track_closure_variables=track_closure_variables ), ) else: self.deferred_where_criteria = False self.where_criteria = coercions.expect( roles.WhereHavingRole, where_criteria ) self.include_aliases = include_aliases self.propagate_to_loaders = propagate_to_loaders def _all_mappers(self): if self.entity: for ent in self.entity.mapper.self_and_descendants: yield ent else: stack = list(self.root_entity.__subclasses__()) while stack: subclass = stack.pop(0) ent = inspection.inspect(subclass, raiseerr=False) if ent: for mp in ent.mapper.self_and_descendants: yield mp else: stack.extend(subclass.__subclasses__()) def _resolve_where_criteria(self, ext_info): if self.deferred_where_criteria: return self.where_criteria._resolve_with_args(ext_info.entity) else: return self.where_criteria def process_compile_state_replaced_entities( self, compile_state, mapper_entities ): return self.process_compile_state(compile_state) def process_compile_state(self, compile_state): """Apply a modification to a given :class:`.CompileState`.""" # if options to limit the criteria to immediate query only, # use compile_state.attributes instead if compile_state.compile_options._with_polymorphic_adapt_map: util.warn( "The with_loader_criteria() function may not work " "correctly with the legacy Query.with_polymorphic() feature. " "Please migrate code to use the with_polymorphic() standalone " "function before using with_loader_criteria()." ) if not compile_state.compile_options._for_refresh_state: self.get_global_criteria(compile_state.global_attributes) def get_global_criteria(self, attributes): for mp in self._all_mappers(): load_criteria = attributes.setdefault( ("additional_entity_criteria", mp), [] ) load_criteria.append(self) inspection._inspects(AliasedClass)(lambda target: target._aliased_insp) inspection._inspects(AliasedInsp)(lambda target: target) def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False): """Produce an alias of the given element, usually an :class:`.AliasedClass` instance. E.g.:: my_alias = aliased(MyClass) session.query(MyClass, my_alias).filter(MyClass.id > my_alias.id) The :func:`.aliased` function is used to create an ad-hoc mapping of a mapped class to a new selectable. By default, a selectable is generated from the normally mapped selectable (typically a :class:`_schema.Table` ) using the :meth:`_expression.FromClause.alias` method. However, :func:`.aliased` can also be used to link the class to a new :func:`_expression.select` statement. Also, the :func:`.with_polymorphic` function is a variant of :func:`.aliased` that is intended to specify a so-called "polymorphic selectable", that corresponds to the union of several joined-inheritance subclasses at once. For convenience, the :func:`.aliased` function also accepts plain :class:`_expression.FromClause` constructs, such as a :class:`_schema.Table` or :func:`_expression.select` construct. In those cases, the :meth:`_expression.FromClause.alias` method is called on the object and the new :class:`_expression.Alias` object returned. The returned :class:`_expression.Alias` is not ORM-mapped in this case. .. seealso:: :ref:`tutorial_orm_entity_aliases` - in the :ref:`unified_tutorial` :ref:`orm_queryguide_orm_aliases` - in the :ref:`queryguide_toplevel` :ref:`ormtutorial_aliases` - in the legacy :ref:`ormtutorial_toplevel` :param element: element to be aliased. Is normally a mapped class, but for convenience can also be a :class:`_expression.FromClause` element. :param alias: Optional selectable unit to map the element to. This is usually used to link the object to a subquery, and should be an aliased select construct as one would produce from the :meth:`_query.Query.subquery` method or the :meth:`_expression.Select.subquery` or :meth:`_expression.Select.alias` methods of the :func:`_expression.select` construct. :param name: optional string name to use for the alias, if not specified by the ``alias`` parameter. The name, among other things, forms the attribute name that will be accessible via tuples returned by a :class:`_query.Query` object. Not supported when creating aliases of :class:`_sql.Join` objects. :param flat: Boolean, will be passed through to the :meth:`_expression.FromClause.alias` call so that aliases of :class:`_expression.Join` objects will alias the individual tables inside the join, rather than creating a subquery. This is generally supported by all modern databases with regards to right-nested joins and generally produces more efficient queries. :param adapt_on_names: if True, more liberal "matching" will be used when mapping the mapped columns of the ORM entity to those of the given selectable - a name-based match will be performed if the given selectable doesn't otherwise have a column that corresponds to one on the entity. The use case for this is when associating an entity with some derived selectable such as one that uses aggregate functions:: class UnitPrice(Base): __tablename__ = 'unit_price' ... unit_id = Column(Integer) price = Column(Numeric) aggregated_unit_price = Session.query( func.sum(UnitPrice.price).label('price') ).group_by(UnitPrice.unit_id).subquery() aggregated_unit_price = aliased(UnitPrice, alias=aggregated_unit_price, adapt_on_names=True) Above, functions on ``aggregated_unit_price`` which refer to ``.price`` will return the ``func.sum(UnitPrice.price).label('price')`` column, as it is matched on the name "price". Ordinarily, the "price" function wouldn't have any "column correspondence" to the actual ``UnitPrice.price`` column as it is not a proxy of the original. """ if isinstance(element, expression.FromClause): if adapt_on_names: raise sa_exc.ArgumentError( "adapt_on_names only applies to ORM elements" ) if name: return element.alias(name=name, flat=flat) else: return coercions.expect( roles.AnonymizedFromClauseRole, element, flat=flat ) else: return AliasedClass( element, alias=alias, flat=flat, name=name, adapt_on_names=adapt_on_names, ) def with_polymorphic( base, classes, selectable=False, flat=False, polymorphic_on=None, aliased=False, innerjoin=False, _use_mapper_path=False, _existing_alias=None, ): """Produce an :class:`.AliasedClass` construct which specifies columns for descendant mappers of the given base. Using this method will ensure that each descendant mapper's tables are included in the FROM clause, and will allow filter() criterion to be used against those tables. The resulting instances will also have those columns already loaded so that no "post fetch" of those columns will be required. .. seealso:: :ref:`with_polymorphic` - full discussion of :func:`_orm.with_polymorphic`. :param base: Base class to be aliased. :param classes: a single class or mapper, or list of class/mappers, which inherit from the base class. Alternatively, it may also be the string ``'*'``, in which case all descending mapped classes will be added to the FROM clause. :param aliased: when True, the selectable will be aliased. For a JOIN, this means the JOIN will be SELECTed from inside of a subquery unless the :paramref:`_orm.with_polymorphic.flat` flag is set to True, which is recommended for simpler use cases. :param flat: Boolean, will be passed through to the :meth:`_expression.FromClause.alias` call so that aliases of :class:`_expression.Join` objects will alias the individual tables inside the join, rather than creating a subquery. This is generally supported by all modern databases with regards to right-nested joins and generally produces more efficient queries. Setting this flag is recommended as long as the resulting SQL is functional. :param selectable: a table or subquery that will be used in place of the generated FROM clause. This argument is required if any of the desired classes use concrete table inheritance, since SQLAlchemy currently cannot generate UNIONs among tables automatically. If used, the ``selectable`` argument must represent the full set of tables and columns mapped by every mapped class. Otherwise, the unaccounted mapped columns will result in their table being appended directly to the FROM clause which will usually lead to incorrect results. When left at its default value of ``False``, the polymorphic selectable assigned to the base mapper is used for selecting rows. However, it may also be passed as ``None``, which will bypass the configured polymorphic selectable and instead construct an ad-hoc selectable for the target classes given; for joined table inheritance this will be a join that includes all target mappers and their subclasses. :param polymorphic_on: a column to be used as the "discriminator" column for the given selectable. If not given, the polymorphic_on attribute of the base classes' mapper will be used, if any. This is useful for mappings that don't have polymorphic loading behavior by default. :param innerjoin: if True, an INNER JOIN will be used. This should only be specified if querying for one specific subtype only """ primary_mapper = _class_to_mapper(base) if selectable not in (None, False) and flat: raise sa_exc.ArgumentError( "the 'flat' and 'selectable' arguments cannot be passed " "simultaneously to with_polymorphic()" ) if _existing_alias: assert _existing_alias.mapper is primary_mapper classes = util.to_set(classes) new_classes = set( [mp.class_ for mp in _existing_alias.with_polymorphic_mappers] ) if classes == new_classes: return _existing_alias else: classes = classes.union(new_classes) mappers, selectable = primary_mapper._with_polymorphic_args( classes, selectable, innerjoin=innerjoin ) if aliased or flat: selectable = selectable._anonymous_fromclause(flat=flat) return AliasedClass( base, selectable, with_polymorphic_mappers=mappers, with_polymorphic_discriminator=polymorphic_on, use_mapper_path=_use_mapper_path, represents_outer_join=not innerjoin, ) @inspection._self_inspects class Bundle( ORMColumnsClauseRole, SupportsCloneAnnotations, sql_base.MemoizedHasCacheKey, InspectionAttr, ): """A grouping of SQL expressions that are returned by a :class:`.Query` under one namespace. The :class:`.Bundle` essentially allows nesting of the tuple-based results returned by a column-oriented :class:`_query.Query` object. It also is extensible via simple subclassing, where the primary capability to override is that of how the set of expressions should be returned, allowing post-processing as well as custom return types, without involving ORM identity-mapped classes. .. versionadded:: 0.9.0 .. seealso:: :ref:`bundles` """ single_entity = False """If True, queries for a single Bundle will be returned as a single entity, rather than an element within a keyed tuple.""" is_clause_element = False is_mapper = False is_aliased_class = False is_bundle = True _propagate_attrs = util.immutabledict() def __init__(self, name, *exprs, **kw): r"""Construct a new :class:`.Bundle`. e.g.:: bn = Bundle("mybundle", MyClass.x, MyClass.y) for row in session.query(bn).filter( bn.c.x == 5).filter(bn.c.y == 4): print(row.mybundle.x, row.mybundle.y) :param name: name of the bundle. :param \*exprs: columns or SQL expressions comprising the bundle. :param single_entity=False: if True, rows for this :class:`.Bundle` can be returned as a "single entity" outside of any enclosing tuple in the same manner as a mapped entity. """ self.name = self._label = name self.exprs = exprs = [ coercions.expect( roles.ColumnsClauseRole, expr, apply_propagate_attrs=self ) for expr in exprs ] self.c = self.columns = ColumnCollection( (getattr(col, "key", col._label), col) for col in [e._annotations.get("bundle", e) for e in exprs] ) self.single_entity = kw.pop("single_entity", self.single_entity) def _gen_cache_key(self, anon_map, bindparams): return (self.__class__, self.name, self.single_entity) + tuple( [expr._gen_cache_key(anon_map, bindparams) for expr in self.exprs] ) @property def mapper(self): return self.exprs[0]._annotations.get("parentmapper", None) @property def entity(self): return self.exprs[0]._annotations.get("parententity", None) @property def entity_namespace(self): return self.c columns = None """A namespace of SQL expressions referred to by this :class:`.Bundle`. e.g.:: bn = Bundle("mybundle", MyClass.x, MyClass.y) q = sess.query(bn).filter(bn.c.x == 5) Nesting of bundles is also supported:: b1 = Bundle("b1", Bundle('b2', MyClass.a, MyClass.b), Bundle('b3', MyClass.x, MyClass.y) ) q = sess.query(b1).filter( b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9) .. seealso:: :attr:`.Bundle.c` """ c = None """An alias for :attr:`.Bundle.columns`.""" def _clone(self): cloned = self.__class__.__new__(self.__class__) cloned.__dict__.update(self.__dict__) return cloned def __clause_element__(self): # ensure existing entity_namespace remains annotations = {"bundle": self, "entity_namespace": self} annotations.update(self._annotations) plugin_subject = self.exprs[0]._propagate_attrs.get( "plugin_subject", self.entity ) return ( expression.ClauseList( _literal_as_text_role=roles.ColumnsClauseRole, group=False, *[e._annotations.get("bundle", e) for e in self.exprs] ) ._annotate(annotations) ._set_propagate_attrs( # the Bundle *must* use the orm plugin no matter what. the # subject can be None but it's much better if it's not. { "compile_state_plugin": "orm", "plugin_subject": plugin_subject, } ) ) @property def clauses(self): return self.__clause_element__().clauses def label(self, name): """Provide a copy of this :class:`.Bundle` passing a new label.""" cloned = self._clone() cloned.name = name return cloned def create_row_processor(self, query, procs, labels): """Produce the "row processing" function for this :class:`.Bundle`. May be overridden by subclasses. .. seealso:: :ref:`bundles` - includes an example of subclassing. """ keyed_tuple = result_tuple(labels, [() for l in labels]) def proc(row): return keyed_tuple([proc(row) for proc in procs]) return proc def _orm_annotate(element, exclude=None): """Deep copy the given ClauseElement, annotating each element with the "_orm_adapt" flag. Elements within the exclude collection will be cloned but not annotated. """ return sql_util._deep_annotate(element, {"_orm_adapt": True}, exclude) def _orm_deannotate(element): """Remove annotations that link a column to a particular mapping. Note this doesn't affect "remote" and "foreign" annotations passed by the :func:`_orm.foreign` and :func:`_orm.remote` annotators. """ return sql_util._deep_deannotate( element, values=("_orm_adapt", "parententity") ) def _orm_full_deannotate(element): return sql_util._deep_deannotate(element) class _ORMJoin(expression.Join): """Extend Join to support ORM constructs as input.""" __visit_name__ = expression.Join.__visit_name__ inherit_cache = True def __init__( self, left, right, onclause=None, isouter=False, full=False, _left_memo=None, _right_memo=None, _extra_criteria=(), ): left_info = inspection.inspect(left) right_info = inspection.inspect(right) adapt_to = right_info.selectable # used by joined eager loader self._left_memo = _left_memo self._right_memo = _right_memo # legacy, for string attr name ON clause. if that's removed # then the "_joined_from_info" concept can go left_orm_info = getattr(left, "_joined_from_info", left_info) self._joined_from_info = right_info if isinstance(onclause, util.string_types): onclause = getattr(left_orm_info.entity, onclause) # #### if isinstance(onclause, attributes.QueryableAttribute): on_selectable = onclause.comparator._source_selectable() prop = onclause.property _extra_criteria += onclause._extra_criteria elif isinstance(onclause, MapperProperty): # used internally by joined eager loader...possibly not ideal prop = onclause on_selectable = prop.parent.selectable else: prop = None if prop: left_selectable = left_info.selectable if sql_util.clause_is_present(on_selectable, left_selectable): adapt_from = on_selectable else: adapt_from = left_selectable ( pj, sj, source, dest, secondary, target_adapter, ) = prop._create_joins( source_selectable=adapt_from, dest_selectable=adapt_to, source_polymorphic=True, of_type_entity=right_info, alias_secondary=True, extra_criteria=_extra_criteria, ) if sj is not None: if isouter: # note this is an inner join from secondary->right right = sql.join(secondary, right, sj) onclause = pj else: left = sql.join(left, secondary, pj, isouter) onclause = sj else: onclause = pj self._target_adapter = target_adapter augment_onclause = onclause is None and _extra_criteria expression.Join.__init__(self, left, right, onclause, isouter, full) if augment_onclause: self.onclause &= sql.and_(*_extra_criteria) if ( not prop and getattr(right_info, "mapper", None) and right_info.mapper.single ): # if single inheritance target and we are using a manual # or implicit ON clause, augment it the same way we'd augment the # WHERE. single_crit = right_info.mapper._single_table_criterion if single_crit is not None: if right_info.is_aliased_class: single_crit = right_info._adapter.traverse(single_crit) self.onclause = self.onclause & single_crit def _splice_into_center(self, other): """Splice a join into the center. Given join(a, b) and join(b, c), return join(a, b).join(c) """ leftmost = other while isinstance(leftmost, sql.Join): leftmost = leftmost.left assert self.right is leftmost left = _ORMJoin( self.left, other.left, self.onclause, isouter=self.isouter, _left_memo=self._left_memo, _right_memo=other._left_memo, ) return _ORMJoin( left, other.right, other.onclause, isouter=other.isouter, _right_memo=other._right_memo, ) def join( self, right, onclause=None, isouter=False, full=False, join_to_left=None, ): return _ORMJoin(self, right, onclause, full=full, isouter=isouter) def outerjoin(self, right, onclause=None, full=False, join_to_left=None): return _ORMJoin(self, right, onclause, isouter=True, full=full) def join( left, right, onclause=None, isouter=False, full=False, join_to_left=None ): r"""Produce an inner join between left and right clauses. :func:`_orm.join` is an extension to the core join interface provided by :func:`_expression.join()`, where the left and right selectables may be not only core selectable objects such as :class:`_schema.Table`, but also mapped classes or :class:`.AliasedClass` instances. The "on" clause can be a SQL expression, or an attribute or string name referencing a configured :func:`_orm.relationship`. :func:`_orm.join` is not commonly needed in modern usage, as its functionality is encapsulated within that of the :meth:`_query.Query.join` method, which features a significant amount of automation beyond :func:`_orm.join` by itself. Explicit usage of :func:`_orm.join` with :class:`_query.Query` involves usage of the :meth:`_query.Query.select_from` method, as in:: from sqlalchemy.orm import join session.query(User).\ select_from(join(User, Address, User.addresses)).\ filter(Address.email_address=='[email protected]') In modern SQLAlchemy the above join can be written more succinctly as:: session.query(User).\ join(User.addresses).\ filter(Address.email_address=='[email protected]') See :meth:`_query.Query.join` for information on modern usage of ORM level joins. .. deprecated:: 0.8 the ``join_to_left`` parameter is deprecated, and will be removed in a future release. The parameter has no effect. """ return _ORMJoin(left, right, onclause, isouter, full) def outerjoin(left, right, onclause=None, full=False, join_to_left=None): """Produce a left outer join between left and right clauses. This is the "outer join" version of the :func:`_orm.join` function, featuring the same behavior except that an OUTER JOIN is generated. See that function's documentation for other usage details. """ return _ORMJoin(left, right, onclause, True, full) def with_parent(instance, prop, from_entity=None): """Create filtering criterion that relates this query's primary entity to the given related instance, using established :func:`_orm.relationship()` configuration. E.g.:: stmt = select(Address).where(with_parent(some_user, Address.user)) The SQL rendered is the same as that rendered when a lazy loader would fire off from the given parent on that attribute, meaning that the appropriate state is taken from the parent object in Python without the need to render joins to the parent table in the rendered statement. The given property may also make use of :meth:`_orm.PropComparator.of_type` to indicate the left side of the criteria:: a1 = aliased(Address) a2 = aliased(Address) stmt = select(a1, a2).where( with_parent(u1, User.addresses.of_type(a2)) ) The above use is equivalent to using the :func:`_orm.with_parent.from_entity` argument:: a1 = aliased(Address) a2 = aliased(Address) stmt = select(a1, a2).where( with_parent(u1, User.addresses, from_entity=a2) ) :param instance: An instance which has some :func:`_orm.relationship`. :param property: String property name, or class-bound attribute, which indicates what relationship from the instance should be used to reconcile the parent/child relationship. .. deprecated:: 1.4 Using strings is deprecated and will be removed in SQLAlchemy 2.0. Please use the class-bound attribute directly. :param from_entity: Entity in which to consider as the left side. This defaults to the "zero" entity of the :class:`_query.Query` itself. .. versionadded:: 1.2 """ if isinstance(prop, util.string_types): util.warn_deprecated_20( "Using strings to indicate relationship names in the ORM " "with_parent() function is deprecated and will be removed " "SQLAlchemy 2.0. Please use the class-bound attribute directly." ) mapper = object_mapper(instance) prop = getattr(mapper.class_, prop).property elif isinstance(prop, attributes.QueryableAttribute): if prop._of_type: from_entity = prop._of_type prop = prop.property return prop._with_parent(instance, from_entity=from_entity) def has_identity(object_): """Return True if the given object has a database identity. This typically corresponds to the object being in either the persistent or detached state. .. seealso:: :func:`.was_deleted` """ state = attributes.instance_state(object_) return state.has_identity def was_deleted(object_): """Return True if the given object was deleted within a session flush. This is regardless of whether or not the object is persistent or detached. .. seealso:: :attr:`.InstanceState.was_deleted` """ state = attributes.instance_state(object_) return state.was_deleted def _entity_corresponds_to(given, entity): """determine if 'given' corresponds to 'entity', in terms of an entity passed to Query that would match the same entity being referred to elsewhere in the query. """ if entity.is_aliased_class: if given.is_aliased_class: if entity._base_alias() is given._base_alias(): return True return False elif given.is_aliased_class: if given._use_mapper_path: return entity in given.with_polymorphic_mappers else: return entity is given return entity.common_parent(given) def _entity_corresponds_to_use_path_impl(given, entity): """determine if 'given' corresponds to 'entity', in terms of a path of loader options where a mapped attribute is taken to be a member of a parent entity. e.g.:: someoption(A).someoption(A.b) # -> fn(A, A) -> True someoption(A).someoption(C.d) # -> fn(A, C) -> False a1 = aliased(A) someoption(a1).someoption(A.b) # -> fn(a1, A) -> False someoption(a1).someoption(a1.b) # -> fn(a1, a1) -> True wp = with_polymorphic(A, [A1, A2]) someoption(wp).someoption(A1.foo) # -> fn(wp, A1) -> False someoption(wp).someoption(wp.A1.foo) # -> fn(wp, wp.A1) -> True """ if given.is_aliased_class: return ( entity.is_aliased_class and not entity._use_mapper_path and (given is entity or given in entity._with_polymorphic_entities) ) elif not entity.is_aliased_class: return given.common_parent(entity.mapper) else: return ( entity._use_mapper_path and given in entity.with_polymorphic_mappers ) def _entity_isa(given, mapper): """determine if 'given' "is a" mapper, in terms of the given would load rows of type 'mapper'. """ if given.is_aliased_class: return mapper in given.with_polymorphic_mappers or given.mapper.isa( mapper ) elif given.with_polymorphic_mappers: return mapper in given.with_polymorphic_mappers else: return given.isa(mapper) def randomize_unitofwork(): """Use random-ordering sets within the unit of work in order to detect unit of work sorting issues. This is a utility function that can be used to help reproduce inconsistent unit of work sorting issues. For example, if two kinds of objects A and B are being inserted, and B has a foreign key reference to A - the A must be inserted first. However, if there is no relationship between A and B, the unit of work won't know to perform this sorting, and an operation may or may not fail, depending on how the ordering works out. Since Python sets and dictionaries have non-deterministic ordering, such an issue may occur on some runs and not on others, and in practice it tends to have a great dependence on the state of the interpreter. This leads to so-called "heisenbugs" where changing entirely irrelevant aspects of the test program still cause the failure behavior to change. By calling ``randomize_unitofwork()`` when a script first runs, the ordering of a key series of sets within the unit of work implementation are randomized, so that the script can be minimized down to the fundamental mapping and operation that's failing, while still reproducing the issue on at least some runs. This utility is also available when running the test suite via the ``--reversetop`` flag. """ from sqlalchemy.orm import unitofwork, session, mapper, dependency from sqlalchemy.util import topological from sqlalchemy.testing.util import RandomSet topological.set = ( unitofwork.set ) = session.set = mapper.set = dependency.set = RandomSet def _getitem(iterable_query, item, allow_negative): """calculate __getitem__ in terms of an iterable query object that also has a slice() method. """ def _no_negative_indexes(): if not allow_negative: raise IndexError( "negative indexes are not accepted by SQL " "index / slice operators" ) else: util.warn_deprecated_20( "Support for negative indexes for SQL index / slice operators " "will be " "removed in 2.0; these operators fetch the complete result " "and do not work efficiently." ) if isinstance(item, slice): start, stop, step = util.decode_slice(item) if ( isinstance(stop, int) and isinstance(start, int) and stop - start <= 0 ): return [] elif (isinstance(start, int) and start < 0) or ( isinstance(stop, int) and stop < 0 ): _no_negative_indexes() return list(iterable_query)[item] res = iterable_query.slice(start, stop) if step is not None: return list(res)[None : None : item.step] else: return list(res) else: if item == -1: _no_negative_indexes() return list(iterable_query)[-1] else: return list(iterable_query[item : item + 1])[0]
zzzeek/sqlalchemy
lib/sqlalchemy/orm/util.py
Python
mit
72,410
# Common functions for Scons scripts... import os def FileListAppend(fileList, buildRelativeFolder, includeFolder, wantedExtension): fullFolder = os.path.join(buildRelativeFolder, includeFolder) for filename in os.listdir(fullFolder): if not filename.endswith("." + wantedExtension): continue fullFilePathName = os.path.join(buildRelativeFolder, includeFolder, filename) if not os.path.isfile(fullFilePathName): continue includeFilePathName = os.path.join(includeFolder, filename) if includeFilePathName.startswith(".."): includeFilePathName = "#" + includeFilePathName fileList.append(includeFilePathName) def BuildName(isDebug, isUnicode, isShared): if int(isDebug): buildName = "Debug" else: buildName = "Retail" if int(isUnicode): buildName += "Unicode" if int(isShared): buildName += "Shared" return buildName
dava/dava.engine
Programs/ColladaConverter/Collada15/SconsCommon.py
Python
bsd-3-clause
855
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:[email protected] # # This file is part of logilab-common. # # logilab-common is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 2.1 of the License, or (at your option) any # later version. # # logilab-common is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-common. If not, see <http://www.gnu.org/licenses/>. import tempfile import os from os.path import join, dirname, abspath from cStringIO import StringIO from sys import version_info from logilab.common.testlib import TestCase, unittest_main from logilab.common.optik_ext import OptionValueError from logilab.common.configuration import Configuration, \ OptionsManagerMixIn, OptionsProviderMixIn, Method, read_old_config DATA = join(dirname(abspath(__file__)), 'data') options = [('dothis', {'type':'yn', 'action': 'store', 'default': True, 'metavar': '<y or n>'}), ('value', {'type': 'string', 'metavar': '<string>', 'short': 'v'}), ('multiple', {'type': 'csv', 'default': ('yop', 'yep'), 'metavar': '<comma separated values>', 'help': 'you can also document the option'}), ('number', {'type': 'int', 'default':2, 'metavar':'<int>', 'help': 'boom'}), ('choice', {'type': 'choice', 'default':'yo', 'choices': ('yo', 'ye'), 'metavar':'<yo|ye>'}), ('multiple-choice', {'type': 'multiple_choice', 'default':('yo', 'ye'), 'choices': ('yo', 'ye', 'yu', 'yi', 'ya'), 'metavar':'<yo|ye>'}), ('named', {'type':'named', 'default':Method('get_named'), 'metavar': '<key=val>'}), ('diffgroup', {'type':'string', 'default':'pouet', 'metavar': '<key=val>', 'group': 'agroup'}), ] class MyConfiguration(Configuration): """test configuration""" def get_named(self): return {'key': 'val'} class ConfigurationTC(TestCase): def setUp(self): self.cfg = MyConfiguration(name='test', options=options, usage='Just do it ! (tm)') def test_default(self): cfg = self.cfg self.assertEqual(cfg['dothis'], True) self.assertEqual(cfg['value'], None) self.assertEqual(cfg['multiple'], ('yop', 'yep')) self.assertEqual(cfg['number'], 2) self.assertEqual(cfg['choice'], 'yo') self.assertEqual(cfg['multiple-choice'], ('yo', 'ye')) self.assertEqual(cfg['named'], {'key': 'val'}) def test_base(self): cfg = self.cfg cfg.set_option('number', '0') self.assertEqual(cfg['number'], 0) self.assertRaises(OptionValueError, cfg.set_option, 'number', 'youpi') self.assertRaises(OptionValueError, cfg.set_option, 'choice', 'youpi') self.assertRaises(OptionValueError, cfg.set_option, 'multiple-choice', ('yo', 'y', 'ya')) cfg.set_option('multiple-choice', 'yo, ya') self.assertEqual(cfg['multiple-choice'], ['yo', 'ya']) self.assertEqual(cfg.get('multiple-choice'), ['yo', 'ya']) self.assertEqual(cfg.get('whatever'), None) def test_load_command_line_configuration(self): cfg = self.cfg args = cfg.load_command_line_configuration(['--choice', 'ye', '--number', '4', '--multiple=1,2,3', '--dothis=n', 'other', 'arguments']) self.assertEqual(args, ['other', 'arguments']) self.assertEqual(cfg['dothis'], False) self.assertEqual(cfg['multiple'], ['1', '2', '3']) self.assertEqual(cfg['number'], 4) self.assertEqual(cfg['choice'], 'ye') self.assertEqual(cfg['value'], None) args = cfg.load_command_line_configuration(['-v', 'duh']) self.assertEqual(args, []) self.assertEqual(cfg['value'], 'duh') self.assertEqual(cfg['dothis'], False) self.assertEqual(cfg['multiple'], ['1', '2', '3']) self.assertEqual(cfg['number'], 4) self.assertEqual(cfg['choice'], 'ye') def test_load_configuration(self): cfg = self.cfg args = cfg.load_configuration(choice='ye', number='4', multiple='1,2,3', dothis='n', multiple_choice=('yo', 'ya')) self.assertEqual(cfg['dothis'], False) self.assertEqual(cfg['multiple'], ['1', '2', '3']) self.assertEqual(cfg['number'], 4) self.assertEqual(cfg['choice'], 'ye') self.assertEqual(cfg['value'], None) self.assertEqual(cfg['multiple-choice'], ('yo', 'ya')) def test_load_configuration_file_case_insensitive(self): file = tempfile.mktemp() stream = open(file, 'w') try: stream.write("""[Test] dothis=no #value= # you can also document the option multiple=yop,yepii # boom number=3 choice=yo multiple-choice=yo,ye named=key:val [agroup] diffgroup=zou """) stream.close() self.cfg.load_file_configuration(file) self.assertEqual(self.cfg['dothis'], False) self.assertEqual(self.cfg['value'], None) self.assertEqual(self.cfg['multiple'], ['yop', 'yepii']) self.assertEqual(self.cfg['diffgroup'], 'zou') finally: os.remove(file) def test_generate_config(self): stream = StringIO() self.cfg.generate_config(stream) self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST] dothis=yes #value= # you can also document the option multiple=yop,yep # boom number=2 choice=yo multiple-choice=yo,ye named=key:val [AGROUP] diffgroup=pouet""") def test_generate_config_with_space_string(self): self.cfg['value'] = ' ' stream = StringIO() self.cfg.generate_config(stream) self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST] dothis=yes value=' ' # you can also document the option multiple=yop,yep # boom number=2 choice=yo multiple-choice=yo,ye named=key:val [AGROUP] diffgroup=pouet""") def test_loopback(self): cfg = self.cfg f = tempfile.mktemp() stream = open(f, 'w') try: cfg.generate_config(stream) stream.close() new_cfg = MyConfiguration(name='testloop', options=options) new_cfg.load_file_configuration(f) self.assertEqual(cfg['dothis'], new_cfg['dothis']) self.assertEqual(cfg['multiple'], new_cfg['multiple']) self.assertEqual(cfg['number'], new_cfg['number']) self.assertEqual(cfg['choice'], new_cfg['choice']) self.assertEqual(cfg['value'], new_cfg['value']) self.assertEqual(cfg['multiple-choice'], new_cfg['multiple-choice']) finally: os.remove(f) def test_setitem(self): self.assertRaises(OptionValueError, self.cfg.__setitem__, 'multiple-choice', ('a', 'b')) self.cfg['multiple-choice'] = ('yi', 'ya') self.assertEqual(self.cfg['multiple-choice'], ('yi', 'ya')) def test_help(self): self.cfg.add_help_section('bonus', 'a nice additional help') help = self.cfg.help().strip() # at least in python 2.4.2 the output is: # ' -v <string>, --value=<string>' # it is not unlikely some optik/optparse versions do print -v<string> # so accept both help = help.replace(' -v <string>, ', ' -v<string>, ') USAGE = """Usage: Just do it ! (tm) Options: -h, --help show this help message and exit --dothis=<y or n> -v<string>, --value=<string> --multiple=<comma separated values> you can also document the option [current: yop,yep] --number=<int> boom [current: 2] --choice=<yo|ye> --multiple-choice=<yo|ye> --named=<key=val> Agroup: --diffgroup=<key=val> Bonus: a nice additional help""" if version_info < (2, 5): # 'usage' header is not capitalized in this version USAGE = USAGE.replace('Usage: ', 'usage: ') elif version_info < (2, 4): USAGE = """usage: Just do it ! (tm) options: -h, --help show this help message and exit --dothis=<y or n> -v<string>, --value=<string> --multiple=<comma separated values> you can also document the option --number=<int> --choice=<yo|ye> --multiple-choice=<yo|ye> --named=<key=val> Bonus: a nice additional help """ self.assertMultiLineEqual(help, USAGE) def test_manpage(self): from logilab.common import __pkginfo__ self.cfg.generate_manpage(__pkginfo__, stream=StringIO()) def test_rewrite_config(self): changes = [('renamed', 'renamed', 'choice'), ('moved', 'named', 'old', 'test'), ] read_old_config(self.cfg, changes, join(DATA, 'test.ini')) stream = StringIO() self.cfg.generate_config(stream) self.assertMultiLineEqual(stream.getvalue().strip(), """[TEST] dothis=yes value=' ' # you can also document the option multiple=yop # boom number=2 choice=yo multiple-choice=yo,ye named=key:val [AGROUP] diffgroup=pouet""") class Linter(OptionsManagerMixIn, OptionsProviderMixIn): options = ( ('profile', {'type' : 'yn', 'metavar' : '<y_or_n>', 'default': False, 'help' : 'Profiled execution.'}), ) def __init__(self): OptionsManagerMixIn.__init__(self, usage="") OptionsProviderMixIn.__init__(self) self.register_options_provider(self) self.load_provider_defaults() class RegrTC(TestCase): def setUp(self): self.linter = Linter() def test_load_defaults(self): self.linter.load_command_line_configuration([]) self.assertEqual(self.linter.config.profile, False) if __name__ == '__main__': unittest_main()
musicmetric/logilab-common
test/unittest_configuration.py
Python
gpl-2.0
10,551
# https://www.hackerrank.com/challenges/restaurant import sys import math """ def gcd(a, b): if b>a: t = a a = b b = t while b!=0: t = b b = a % b a = t return a """ """ t = int(raw_input()) while t>0: l, b = [int(x) for x in raw_input().split()] print l * b / gcd(l, b)**2 t -= 1 """ """ t = int(raw_input()) while t>0: n = int(raw_input()) arr = [int(x) for x in raw_input().split()] print reduce(lambda x,y: x*y if x*y<1234567 else x*y-1234567, arr) t-=1 """ """ t = int(raw_input()) while t>0: n, k = [int(x) for x in raw_input().split()] res = (2*k + 1 if k<(n/2) else 2(n-k-1)) print res t -= 1 """ """ t = int(raw_input()) while t>0: n = int(raw_input()) if n%2 == 1: print 0 else: a = [x for x in range(2,n/2 + 1) if n%x== 0 and x%2 == 0] # print a print len(a) + 1 t -= 1 """ """ t = int(raw_input()) while t>0: n = long(raw_input()) print pow(n,2)%(pow(10,9)+7) t -= 1 """ """ def cust_fact(n,m): fact = 1 ret = {} for i in xrange(1,n+m): fact = (fact*i) % (pow(10,9) + 7) if i == n: ret["n"] = fact if i == m-1: ret["m1"] = fact if i == n+m-1: ret["nm1"] = fact return ret t = int(raw_input()) while t>0: n, m = [int(x) for x in raw_input().split()] if m == 1: print 1 elif n == 0: print 1 else: x = cust_fact(n, m) print x["nm1"]/(x["n"]*x["m1"]) % (pow(10,9) + 7) t -= 1 """ """ def nCrTable(n): x = 1 i = 0 print x, while i<n: # print "O:",x x = x*(n-i) / (i + 1) print x, i += 1 print nCrTable(2) nCrTable(3) nCrTable(4) nCrTable(5) nCrTable(6) """ """ t = int(raw_input()) while t>0: n = int(raw_input()) nCrTable(n) t -= 1 """ """ l, s1, s2 = [int(x) for x in raw_input().split()] q = int(raw_input()) rel_vel = math.fabs(s1 - s2) while q>0: qi = int(raw_input()) diag = math.sqrt(qi)*math.sqrt(2) print ( l*math.sqrt(2) - diag ) / rel_vel q -= 1 """ def bin_seq(n): x = 0 while n >= 0: n -= 1 yield x if x%2 == 0: x += 1 else: a = x i = 1 while a%10 != 1: i += 1 a /= 10 print "**", x, i, a if (i+1) == len(str(x)): x = pow(10, i+1) else: x += i*10 - 1 for x in bin_seq(10): print x
capsci/chrome
practice/python/resta.py
Python
mit
2,221
#! -*- coding: utf-8 -*- from trans_l.decorators import transliterate_function @transliterate_function(language_code="ru", reversed=False) def trans_word(word): return word
WilfMan/vkontakte
trans.py
Python
mit
179
# -*- coding: utf-8 -*- # Copyright (c) 2021, Frappe Technologies and contributors # For license information, please see license.txt from __future__ import unicode_literals # import frappe from frappe.model.document import Document import json class test(Document): def db_insert(self): d = self.get_valid_dict(convert_dates_to_str=True) with open("data_file.json", "w+") as read_file: json.dump(d, read_file) def load_from_db(self): with open("data_file.json", "r") as read_file: d = json.load(read_file) super(Document, self).__init__(d) def db_update(self): d = self.get_valid_dict(convert_dates_to_str=True) with open("data_file.json", "w+") as read_file: json.dump(d, read_file) def get_list(self, args): with open("data_file.json", "r") as read_file: return [json.load(read_file)] def get_value(self, fields, filters, **kwargs): # return [] with open("data_file.json", "r") as read_file: return [json.load(read_file)]
saurabh6790/frappe
frappe/core/doctype/test/test.py
Python
mit
970
#!/usr/bin/python # -*- coding: utf-8 -*- """ HTTP/Web class. Holds commonly-used HTTP/web request/post methods. Compatible with Python 2.5, 2.6, 2.7 """ import time import urllib2, cookielib, urllib, httplib from sys import stderr DOWNLOAD_TIMEOUT = 10 class Httpy: """ Class used for communicating with web servers. """ DEFAULT_USERAGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:34.0) Gecko/20100101 Firefox/34.0' def __init__(self, user_agent=None, debugging=False): """ Sets this class's user agent. """ self.debugging = debugging self.cj = cookielib.CookieJar() self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj)) self.Request = urllib2.Request self.urlopen = self.opener.open if user_agent != None: self.user_agent = user_agent else: self.user_agent = Httpy.DEFAULT_USERAGENT def raise_timeout(self, signum, frame): raise Exception("Timeout") def get_meta(self, url): """ Reads file info (content type, length, etc) without downloading Times out after 10 seconds (5 to unshorten, 5 to get meta) """ url = self.unshorten(url) try: headers = {'User-agent' : self.user_agent} req = urllib2.Request(url, headers=headers) site = self.urlopen(req) #site = self.urlopen(url) except Exception: return {'content-type': 'unknown', 'content-length': '0'} return site.info() def unshorten(self, url): """ Unshortens URL. Follows until no more redirects. Times out after 5 seconds """ try: headers = {'User-agent' : self.user_agent} req = urllib2.Request(url, headers=headers) site = self.urlopen(req) except urllib2.HTTPError: return url except Exception: return url return site.url def check(self, url): """ Check if a URL is valid """ try: self.urlopen(url) except: return False return True def get(self, url, headers={}): """ Attempts GET request with web server. Returns html source of a webpage (string). Returns '' if unable to retrieve webpage for any reason. Will attempt to repeatedly post if '504' response error is received or 'getaddrinfo' fails. """ if not 'User-agent' in headers: headers['User-agent'] = self.user_agent try: req = urllib2.Request(url, headers=headers) handle = self.urlopen(req) except Exception, e: if self.debugging: stderr.write('Httpy: Exception while creating request: %s\n' % str(e)) raise e try: result = handle.read() except Exception, e: if self.debugging: stderr.write('Httpy: Exception while reading response: %s\n' % str(e)) raise e return result def getter(self, url, headers={}, retry=1): """ Attempts GET request with extended options. Returns html source of a webpage (string). Returns '' if unable to retrieve webpage for any reason. Will retry attempts that fail. Does *NOT* utilize cookie jar! """ if not 'User-agent' in headers: headers['User-agent'] = self.user_agent (https, host, path) = self.get_https_host_path(url) if self.debugging: stderr.write('Httpy.py: GET http%s://%s%s\n' % ('s' if https else '', host, path)) try: if https: req = httplib.HTTPSConnection(host) else: req = httplib.HTTPConnection(host) req.putrequest('GET', path) if self.debugging: stderr.write('Httpy.py: headers:\n') for hkey in headers.keys(): if self.debugging: stderr.write(' %s:\t%s\n' % (hkey, headers[hkey])) req.putheader(hkey, headers[hkey]) req.endheaders() resp = req.getresponse() if self.debugging: stderr.write('Httpy.py: response headers:') for h,v in resp.getheaders(): if self.debugging: stderr.write(' %s: "%s"\n' % (h, v)) if resp.status == 200: return resp.read() elif resp.status in [301, 302] and resp.getheader('Location') != None: if self.debugging: stderr.write('Httpy.py: Got %d to %s' % (resp.status, resp.getheader('Location'))) return self.getter(resp.getheader('Location'), headers=headers, retry=retry-1) else: result = '' try: result = resp.read() except: pass if self.debugging: stderr.write('Httpy.py: HTTP status %s: %s\n' % (resp.status, resp.reason)) return result except Exception, e: if self.debugging: stderr.write('Httpy.py: Exception: %s: %s\n' % (url, str(e))) if retry > 0: return self.getter(url, headers=headers, retry=retry-1) return '' def get_https_host_path(self, url): https = url.startswith('https') path = '' host = url[url.find('//')+2:] if '/' in host: host = host[:host.find('/')] path = url[url.find(host)+len(host):] return (https, host, path) def fix_string(self, s): r = '' for c in s: c2 = '' try: c2 = str(c) except UnicodeEncodeError: c2 = '' r += c2 return r def fix_dict(self, dict): d = {} for key in dict: value = dict[key] d[key] = self.fix_string(value) return d def oldpost(self, url, postdict=None, headers={}): """ Submits a POST request to URL. Posts 'postdict' if not None. URL-encodes postdata (if dict) and strips Unicode chars. """ result = '' if not 'User-agent' in headers: headers['User-agent'] = self.user_agent if postdict == None: encoded_data = '' elif type(postdict) == dict: encoded_data = urllib.urlencode(postdict) elif type(postdict) == str: encoded_data = postdict try: req = self.Request(url, encoded_data, headers) handle = self.urlopen(req) result = handle.read() except Exception, e: if self.debugging: stderr.write('Httpy.py: Exception: %s: %s\n' % (url, str(e))) return result def post(self, url, postdict=None, headers={}): """ Attempts POST request with web server. Returns response of a POST request to a web server. 'postdict' must be a dictionary of keys/values to post to the server. Returns '' if unable to post/retrieve response. Will attempt to repeatedly post if '504' response error is received or 'getaddrinfo' fails. """ if not 'User-agent' in headers: headers['User-agent'] = self.user_agent data = '' if postdict != None and type(postdict) == dict: fixed_dict = self.fix_dict(postdict) data = urllib.urlencode(fixed_dict) elif postdict != None and type(postdict) == str: data = postdict headers['Content-Length'] = len(data) host = url[url.find('//')+2:] host = host[:host.find('/')] if self.debugging: stderr.write('Httpy.py: host: "%s"\n' % host) path = url[url.find(host)+len(host):] if self.debugging: stderr.write('Httpy.py: path: "%s"\n' % path) if self.debugging: stderr.write('Httpy.py: headers: %s\n' % str(headers)) if self.debugging: stderr.write('Httpy.py: postdata: "%s"\n' % data) try: if url.startswith('https'): req = httplib.HTTPSConnection(host) else: req = httplib.HTTPConnection(host) req.putrequest('POST', path) for hkey in headers.keys(): req.putheader(hkey, headers[hkey]) req.endheaders() req.send(data) resp = req.getresponse() if resp.status == 200: return resp.read() else: if self.debugging: stderr.write('Httpy.py: HTTP status %s: %s: %s\n' % (resp.status, resp.reason, resp.read())) if self.debugging: stderr.write('Httpy.py: Response headers:\n') for name, value in resp.getheaders(): if self.debugging: stderr.write('Httpy.py: \t"%s"="%s"\n' % (name, value)) return '' except Exception, e: if self.debugging: stderr.write('Httpy.py: Exception: %s: %s\n' % (url, str(e))) return '' def download(self, url, save_as, headers={}, timeout=DOWNLOAD_TIMEOUT, raise_exception=True, retries=3): """ Downloads file from URL to save_as path. """ retry_count = 0 if not 'User-agent' in headers: headers['User-agent'] = self.user_agent outfile = open(save_as, 'wb') while True: try: retry_count += 1 req = urllib2.Request(url, headers=headers) handle = self.urlopen(req, timeout=timeout) while True: buf = handle.read(65536) if len(buf) == 0: break outfile.write(buf) except Exception, e: if self.debugging: stderr.write('Httpy.py: download(%s): %s\n' % (url, str(e))) if retry_count <= retries: if self.debugging: stderr.write('Httpy.py: download(%s): Retrying (%d remain)\n' % (url, retries - retry_count)) continue if raise_exception: raise e break outfile.close() def clear_cookies(self): """ Clears cookies in cookie jar. """ self.cj.clear() def set_user_agent(user_agent): """ Changes the user-agent used when connecting. """ self.user_agent = user_agent def between(self, source, start, finish): """ Helper method. Useful when parsing responses from web servers. Looks through a given source string for all items between two other strings, returns the list of items (or empty list if none are found). Example: test = 'hello >30< test >20< asdf >>10<< sadf>' print between(test, '>', '<') would print the list: ['30', '20', '>10'] """ result = [] i = source.find(start) j = source.find(finish, i + len(start)) while i >= 0 and j >= 0: i = i + len(start) result.append(source[i:j]) i = source.find(start, j + len(finish)) j = source.find(finish, i + len(start)) return result
4pr0n/gonewilder
py/Httpy.py
Python
gpl-2.0
9,304
from django.db import models from django.contrib.auth.models import User from django.http import Http404 from ckeditor_uploader.fields import RichTextUploadingField from wquests import common_functions class WebQuest(models.Model): creator_ref = models.ForeignKey(User, on_delete=models.DO_NOTHING, verbose_name="Creator") parent_quest_ref = models.ForeignKey('WebQuest', on_delete=models.DO_NOTHING, verbose_name="Parent quest", blank=True, default=None, null=True) def __str__(self): return str(self.pk) def get_absolute_url(self): return "/IMPLEMENT_FUNCTION_ABSOLUTE_URL" def fill_last_version(self, request): my = common_functions.get_value_or_none(request.GET, 'my') if (not my) or (not request.user.is_authenticated) or self.creator_ref != request.user: self.web_quest_version = WebQuestVersion.objects.filter(web_quest_ref = self.pk).filter(published=True).order_by('-version')[0] else: self.web_quest_version = WebQuestVersion.objects.filter(web_quest_ref = self.pk).order_by('-version')[0] def get_last_changed_webquests(request): my = common_functions.get_value_or_none(request.GET, 'my') theme = common_functions.get_value_or_none(request.GET, 'theme') if not my: if theme is not None and theme != "" and theme != 0: web_quest_versions = WebQuestVersion.objects.filter(theme=int(theme)).filter(published = True).order_by('-version').order_by('-createdon').values_list('web_quest_ref', flat=True).distinct() else: web_quest_versions = WebQuestVersion.objects.filter(published = True).order_by('-version').order_by('-createdon').values_list('web_quest_ref', flat=True).distinct() return WebQuest.objects.filter(pk__in=[web_quest_version for web_quest_version in web_quest_versions]) else: if not request.user.is_authenticated: raise Http404("You are not authorized!") if theme is not None and theme != "" and theme != 0: web_quest_versions = WebQuestVersion.objects.filter(theme=int(theme)).order_by('-version').order_by('-createdon').values_list('web_quest_ref', flat=True).distinct() else: web_quest_versions = WebQuestVersion.objects.order_by('-version').order_by('-createdon').values_list('web_quest_ref', flat=True).distinct() return WebQuest.objects.filter(creator_ref=request.user).filter(pk__in=[web_quest_version for web_quest_version in web_quest_versions]) class Meta: verbose_name = 'Web-quest' verbose_name_plural = 'Web-quests' class WebQuestVersion(models.Model): web_quest_ref = models.ForeignKey('WebQuest', on_delete=models.CASCADE, verbose_name='Web-quest') language = models.CharField(max_length=4, blank=True, default=None, verbose_name="Language") version = models.FloatField(default=0.0, verbose_name="Version") createdon = models.DateField(verbose_name="Date of creation", auto_now_add=True) name = models.CharField(max_length=100, verbose_name="Name") published = models.BooleanField(default=False, verbose_name="Published") theme = models.IntegerField(default=0, verbose_name="Theme") def __str__(self): wq = WebQuest.objects.get(pk=self.web_quest_ref.pk) return str(wq) + ' : ' + self.name + ' ' + self.language + ' ' + str(self.version) def get_absolute_url(self): return "/IMPLEMENT_FUNCTION_ABSOLUTE_URL" class Meta: verbose_name = 'Web-quest version' verbose_name_plural = 'Web-quest versions' class WebQuestVersionSection(models.Model): web_quest_version_ref = models.ForeignKey('WebQuestVersion', on_delete=models.CASCADE, verbose_name='Web-quest version') order = models.IntegerField(verbose_name='Order number', blank=True, default=0) title = models.CharField(max_length=50, blank=True, default=None, verbose_name='Title') rtcontent = RichTextUploadingField(default=None, null=True, blank=True, verbose_name="Content of section") def __str__(self): wqv = WebQuestVersion.objects.get(pk=self.web_quest_version_ref.pk) return str(wqv) + ' : ' + self.title def get_absolute_url(self): return "/IMPLEMENT_FUNCTION_ABSOLUTE_URL" class Meta: verbose_name = 'Web-quest version section' verbose_name_plural = 'Web-quest version sections' # TODO: Add tags and themes
priakni/wquests
wquests/wqengine/models.py
Python
mit
4,476
__license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <[email protected]>' __docformat__ = 'restructuredtext en' from calibre.devices.usbms.driver import USBMS class TECLAST_K3(USBMS): name = 'Teclast K3/K5 Device Interface' gui_name = 'K3/K5' description = _('Communicate with the Teclast K3/K5 reader.') author = 'Kovid Goyal' supported_platforms = ['windows', 'osx', 'linux'] # Ordered list of supported formats FORMATS = ['epub', 'fb2', 'doc', 'pdf', 'txt'] VENDOR_ID = [0x071b] PRODUCT_ID = [0x3203] BCD = [0x0000, 0x0100] VENDOR_NAME = ['TECLAST', 'IMAGIN', 'RK28XX', 'PER3274B'] WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['DIGITAL_PLAYER', 'TL-K5', 'EREADER', 'USB-MSC', 'PER3274B'] MAIN_MEMORY_VOLUME_LABEL = 'K3 Main Memory' STORAGE_CARD_VOLUME_LABEL = 'K3 Storage Card' EBOOK_DIR_MAIN = '' EBOOK_DIR_CARD_A = '' SUPPORTS_SUB_DIRS = True class NEWSMY(TECLAST_K3): name = 'Newsmy device interface' gui_name = 'Newsmy' description = _('Communicate with the Newsmy reader.') FORMATS = ['epub', 'fb2', 'pdb', 'html', 'pdf', 'txt', 'skt'] VENDOR_NAME = '' WINDOWS_MAIN_MEM = 'NEWSMY' WINDOWS_CARD_A_MEM = 'USBDISK____SD' class ARCHOS7O(TECLAST_K3): name = 'Archos 7O device interface' gui_name = 'Archos' description = _('Communicate with the Archos reader.') FORMATS = ['epub', 'mobi', 'fb2', 'rtf', 'ap', 'html', 'pdf', 'txt'] VENDOR_NAME = 'ARCHOS' WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'USB-MSC' class PICO(NEWSMY): name = 'Pico device interface' gui_name = 'Pico' description = _('Communicate with the Pico reader.') VENDOR_NAME = ['TECLAST', 'IMAGIN', 'LASER-', ''] WINDOWS_MAIN_MEM = ['USBDISK__USER', 'EB720'] EBOOK_DIR_MAIN = 'Books' FORMATS = ['EPUB', 'FB2', 'TXT', 'LRC', 'PDB', 'PDF', 'HTML', 'WTXT'] SCAN_FROM_ROOT = True class IPAPYRUS(TECLAST_K3): name = 'iPapyrus device interface' gui_name = 'iPapyrus' description = _('Communicate with the iPapyrus reader.') FORMATS = ['epub', 'pdf', 'txt'] VENDOR_NAME = ['E_READER', 'EBOOKREA'] WINDOWS_MAIN_MEM = '' class SOVOS(TECLAST_K3): name = 'Sovos device interface' gui_name = 'Sovos' description = _('Communicate with the Sovos reader.') FORMATS = ['epub', 'fb2', 'pdf', 'txt'] VENDOR_NAME = 'RK28XX' WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'USB-MSC' class SUNSTECH_EB700(TECLAST_K3): name = 'Sunstech EB700 device interface' gui_name = 'EB700' description = _('Communicate with the Sunstech EB700 reader.') FORMATS = ['epub', 'fb2', 'pdf', 'pdb', 'txt'] VENDOR_NAME = 'SUNEB700' WINDOWS_MAIN_MEM = 'USB-MSC' class STASH(TECLAST_K3): name = 'Stash device interface' gui_name = 'Stash' description = _('Communicate with the Stash W950 reader.') FORMATS = ['epub', 'fb2', 'lrc', 'pdb', 'html', 'fb2', 'wtxt', 'txt', 'pdf'] VENDOR_NAME = 'STASH' WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'W950' class WEXLER(TECLAST_K3): name = 'Wexler device interface' gui_name = 'Wexler' description = _('Communicate with the Wexler reader.') FORMATS = ['epub', 'fb2', 'pdf', 'txt'] VENDOR_NAME = 'WEXLER' WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'T7001'
yeyanchao/calibre
src/calibre/devices/teclast/driver.py
Python
gpl-3.0
3,441
# Copyright 2013-2014 Sebastian Kreft # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import json import os import sys import unittest import mock import gitlint import gitlint.linters as linters # pylint: disable=too-many-public-methods class GitLintTest(unittest.TestCase): @classmethod def setUpClass(cls): cls._stderr = sys.stderr sys.stderr = sys.stdout cls.git_lint_config = gitlint.get_config(None) @classmethod def tearDownClass(cls): sys.stderr = cls._stderr def setUp(self): self.root = '/home/user/repo' self.filename = os.path.join(self.root, 'changed.py') self.filename2 = os.path.join(self.root, 'foo.txt') self.stdout = io.StringIO() self.stderr = io.StringIO() self.git_repository_root_patch = mock.patch( 'gitlint.git.repository_root', return_value=self.root) self.git_repository_root = self.git_repository_root_patch.start() self.addCleanup(self.git_repository_root_patch.stop) self.hg_repository_root_patch = mock.patch( 'gitlint.hg.repository_root', return_value=None) self.hg_repository_root = self.hg_repository_root_patch.start() self.addCleanup(self.hg_repository_root_patch.stop) self.git_modified_files_patch = mock.patch( 'gitlint.git.modified_files', return_value={self.filename: ' M'}) self.git_modified_files = self.git_modified_files_patch.start() self.addCleanup(self.git_modified_files_patch.stop) self.git_modified_lines_patch = mock.patch( 'gitlint.git.modified_lines', return_value=[3, 14]) self.git_modified_lines = self.git_modified_lines_patch.start() self.addCleanup(self.git_modified_lines_patch.stop) self.git_last_commit_patch = mock.patch( 'gitlint.git.last_commit', return_value="abcd" * 10) self.git_last_commit = self.git_last_commit_patch.start() self.addCleanup(self.git_last_commit_patch.stop) self.lint_patch = mock.patch( 'gitlint.linters.lint') self.lint = self.lint_patch.start() self.addCleanup(self.lint_patch.stop) def reset_mock_calls(self): """Resets the counter calls of the defined mocks.""" self.git_repository_root.reset_mock() self.git_modified_files.reset_mock() self.git_modified_lines.reset_mock() self.lint.reset_mock() def assert_mocked_calls(self, tracked_only=False, commit=None): """Checks if the mocks were called as expected. This method exists to avoid duplication. """ self.git_modified_files.assert_called_once_with( self.root, tracked_only=tracked_only, commit=commit) self.git_modified_lines.assert_called_once_with( self.filename, ' M', commit=commit) self.lint.assert_called_once_with( self.filename, [3, 14], self.git_lint_config) def test_find_invalid_filenames(self): filenames = ['/tmp/outside_repo', '%s/inexistent_file' % self.root, '%s/directory_in_repo/' % self.root, '%s/valid' % self.root] expected = { '/tmp/outside_repo': 'does not belong to repository', '%s/inexistent_file' % self.root: 'does not exist', '%s/directory_in_repo/' % self.root: ('Directories are not yet ' + 'supported'), } with mock.patch( 'os.path.exists', side_effect=lambda filename: 'inexistent' not in filename), \ mock.patch( 'os.path.isdir', side_effect=lambda filename: 'directory' in filename): invalid_filenames = dict( gitlint.find_invalid_filenames(filenames, self.root)) self.assertEqual(expected.keys(), invalid_filenames.keys()) for filename in invalid_filenames: self.assertIn(filename, invalid_filenames[filename]) self.assertIn(expected[filename], invalid_filenames[filename]) def test_main_not_in_repo(self): self.git_repository_root.return_value = None self.assertEqual( 128, gitlint.main([], stdout=None, stderr=self.stderr)) self.assertIn('Not a git repository', self.stderr.getvalue()) def test_main_nothing_changed(self): self.git_modified_files.return_value = {} self.assertEqual( 0, gitlint.main([], stdout=None, stderr=None)) self.git_modified_files.assert_called_once_with( self.root, tracked_only=False, commit=None) def test_main_file_changed_and_still_valid(self): lint_response = { self.filename: { 'comments': [] } } self.lint.return_value = lint_response self.assertEqual( 0, gitlint.main([], stdout=self.stdout, stderr=None)) self.assertIn('OK', self.stdout.getvalue()) self.assert_mocked_calls() def test_main_file_changed_and_still_valid_with_commit(self): lint_response = { self.filename: { 'comments': [] } } self.lint.return_value = lint_response self.assertEqual( 0, gitlint.main( ['git-lint', '--last-commit'], stdout=self.stdout, stderr=None)) self.assertIn('OK', self.stdout.getvalue()) self.assert_mocked_calls(commit='abcd' * 10) def test_main_file_changed_and_still_valid_tracked_only(self): lint_response = { self.filename: { 'comments': [] } } self.lint.return_value = lint_response self.assertEqual( 0, gitlint.main(['git-lint', '-t'], stdout=self.stdout, stderr=None)) self.assertIn('OK', self.stdout.getvalue()) self.assert_mocked_calls(tracked_only=True) self.reset_mock_calls() self.stdout = io.StringIO() self.stderr = io.StringIO() self.assertEqual(0, gitlint.main(['git-lint', '--tracked'], stdout=self.stdout, stderr=None)) self.assertIn('OK', self.stdout.getvalue()) self.assert_mocked_calls(tracked_only=True) def test_main_file_changed_but_skipped(self): lint_response = { self.filename: { 'skipped': ['foo'] } } self.lint.return_value = lint_response self.assertEqual(0, gitlint.main([], stdout=self.stdout, stderr=None)) self.assertIn('SKIPPED', self.stdout.getvalue()) self.assert_mocked_calls() def test_main_file_linter_not_found(self): lint_response = { self.filename: { 'error': ['foo'] } } self.lint.return_value = lint_response self.assertEqual(4, gitlint.main([], stdout=self.stdout, stderr=None)) self.assertIn('ERROR', self.stdout.getvalue()) self.assert_mocked_calls() def test_main_file_changed_and_now_invalid(self): lint_response = { self.filename: { 'comments': [ { 'line': 3, 'message': 'error' } ] } } self.lint.return_value = lint_response self.assertEqual(1, gitlint.main([], stdout=self.stdout, stderr=None)) self.assertIn('line 3: error', self.stdout.getvalue()) self.assert_mocked_calls() def test_main_file_with_skipped_error_and_comments(self): lint_response = { self.filename: { 'skipped': ['skipped1', 'skipped2'], 'error': ['error1', 'error2'], 'comments': [ { 'line': 3, 'message': 'message1' }, { 'line': 4, 'message': 'message2' } ] } } self.lint.return_value = lint_response self.assertEqual(1, gitlint.main([], stdout=self.stdout, stderr=None)) self.assertIn('line 3: message1', self.stdout.getvalue()) self.assertIn('line 4: message2', self.stdout.getvalue()) self.assertIn('skipped1', self.stdout.getvalue()) self.assertIn('skipped2', self.stdout.getvalue()) self.assertIn('error1', self.stdout.getvalue()) self.assertIn('error2', self.stdout.getvalue()) self.assert_mocked_calls() def test_main_file_json(self): self.git_modified_files.return_value = { self.filename: ' M', self.filename2: 'M ', } lint_responses = [ { self.filename: { 'skipped': ['skipped1', 'skipped2'], 'error': ['error1', 'error2'], 'comments': [ { 'line': 3, 'message': 'message1' }, { 'line': 4, 'message': 'message2' } ] } }, { self.filename2: { 'comments': [] }, } ] self.lint.side_effect = lint_responses expected_response = { self.filename: { 'skipped': ['skipped1', 'skipped2'], 'error': ['error1', 'error2'], 'comments': [ { 'line': 3, 'message': 'message1', 'formatted_message': 'line 3: message1' }, { 'line': 4, 'message': 'message2', 'formatted_message': 'line 4: message2' } ] }, self.filename2: { 'comments': [] }, } self.assertEqual( 1, gitlint.main(['git-lint', '--json'], stdout=self.stdout, stderr=None)) self.assertEqual(expected_response, json.loads(self.stdout.getvalue())) def test_main_file_with_skipped_and_error(self): lint_response = { self.filename: { 'skipped': ['skipped1'], 'error': ['error1'], 'comments': [] } } self.lint.return_value = lint_response self.assertEqual(4, gitlint.main([], stdout=self.stdout, stderr=None)) self.assertNotIn('OK', self.stdout.getvalue()) self.assertIn('skipped1', self.stdout.getvalue()) self.assertIn('error1', self.stdout.getvalue()) self.assert_mocked_calls() def test_main_force_all_lines(self): lint_response = { self.filename: { 'comments': [ { 'line': 3, 'message': 'error' } ] } } self.lint.return_value = lint_response self.git_modified_lines.return_value = [] self.assertEqual(1, gitlint.main(['git-lint', '--force'], stdout=self.stdout, stderr=None)) self.assertIn('line 3: error', self.stdout.getvalue()) self.git_modified_files.assert_called_once_with( self.root, tracked_only=False, commit=None) self.lint.assert_called_once_with( self.filename, None, self.git_lint_config) self.reset_mock_calls() self.stdout = io.StringIO() self.assertEqual(1, gitlint.main(['git-lint', '-f'], stdout=self.stdout, stderr=None)) self.assertIn('line 3: error', self.stdout.getvalue()) self.git_modified_files.assert_called_once_with( self.root, tracked_only=False, commit=None) self.lint.assert_called_once_with( self.filename, None, self.git_lint_config) def test_main_with_invalid_files(self): with mock.patch('gitlint.find_invalid_filenames', return_value=[('foo.txt', 'does not exist')]): self.assertEqual(2, gitlint.main(['git-lint', 'foo.txt'], stdout=None, stderr=self.stderr)) self.assertIn('does not exist', self.stderr.getvalue()) def test_main_with_valid_files(self): lint_response = { self.filename: { 'comments': [] }, self.filename2: { 'comments': [] }, } self.lint.return_value = lint_response with mock.patch('gitlint.find_invalid_filenames', return_value=[]), \ mock.patch('os.getcwd', return_value=self.root): self.assertEqual( 0, gitlint.main(['git-lint', self.filename, self.filename2], stdout=self.stdout, stderr=None)) self.assertIn('OK', self.stdout.getvalue()) self.assertIn(os.path.basename(self.filename), self.stdout.getvalue()) self.assertIn(os.path.basename(self.filename2), self.stdout.getvalue()) self.git_modified_files.assert_called_once_with( self.root, tracked_only=False, commit=None) expected_calls = [ mock.call(self.filename, ' M', commit=None), mock.call(self.filename2, None, commit=None), ] self.assertEqual(expected_calls, self.git_modified_lines.call_args_list) expected_calls = [ mock.call(self.filename, [3, 14], self.git_lint_config), mock.call(self.filename2, [3, 14], self.git_lint_config)] self.assertEqual(expected_calls, self.lint.call_args_list) def test_main_with_valid_files_relative(self): lint_response = { self.filename: { 'comments': [] }, self.filename2: { 'comments': [] }, } self.lint.return_value = lint_response with mock.patch('gitlint.find_invalid_filenames', return_value=[]), \ mock.patch('os.getcwd', return_value=self.root): self.assertEqual( 0, gitlint.main(['git-lint', 'bar/../changed.py', './foo.txt'], stdout=self.stdout, stderr=self.stderr)) self.assertIn('OK', self.stdout.getvalue()) self.assertEqual('', self.stderr.getvalue()) self.git_modified_files.assert_called_once_with( self.root, tracked_only=False, commit=None) expected_calls = [mock.call(self.filename, ' M', commit=None), mock.call(self.filename2, None, commit=None)] self.assertEqual(expected_calls, self.git_modified_lines.call_args_list) expected_calls = [ mock.call(self.filename, [3, 14], self.git_lint_config), mock.call(self.filename2, [3, 14], self.git_lint_config)] self.assertEqual(expected_calls, self.lint.call_args_list) def test_get_config(self): git_config = os.path.join(self.root, '.gitlint.yaml') config = """python: extensions: - .py command: python arguments: - "-R" - "-v" filter: ".*" installation: "Really?" """ with mock.patch('os.path.exists', return_value=True), \ mock.patch('gitlint.open', mock.mock_open(read_data=config), create=True) as mock_open: parsed_config = gitlint.get_config(self.root) mock_open.assert_called_once_with(git_config) self.assertEqual(['.py'], list(parsed_config.keys())) self.assertEqual(1, len(parsed_config['.py'])) def test_get_config_from_default(self): with mock.patch('os.path.exists', return_value=False): parsed_config = gitlint.get_config(self.root) self.assertEquals(self.git_lint_config, parsed_config) def test_get_config_not_in_a_repo(self): # When not in a repo should return the default config. self.git_repository_root.return_value = None parsed_config = gitlint.get_config(None) self.assertEquals(self.git_lint_config, parsed_config) def test_get_config_empty(self): # When config file is empty return an empty dictionary. with mock.patch('os.path.exists', return_value=True), \ mock.patch('gitlint.open', mock.mock_open(read_data=''), create=True) as mock_open: parsed_config = gitlint.get_config(self.root) self.assertEqual({}, parsed_config) def test_format_comment(self): self.assertEqual('', gitlint.format_comment({})) self.assertEqual('line 1: message', gitlint.format_comment({ 'line': 1, 'message': 'message', })) self.assertEqual('line 1, col 2: message', gitlint.format_comment({ 'line': 1, 'column': 2, 'message': 'message', })) self.assertEqual('line 1, col 2: Error: message', gitlint.format_comment({ 'line': 1, 'column': 2, 'severity': 'Error', 'message': 'message', })) self.assertEqual('line 1, col 2: Error: [not-used]: message', gitlint.format_comment({ 'line': 1, 'column': 2, 'severity': 'Error', 'message_id': 'not-used', 'message': 'message', })) self.assertEqual('col 2: [not-used]: message', gitlint.format_comment({ 'column': 2, 'message_id': 'not-used', 'message': 'message', })) self.assertEqual('line 1, col 2: Error: [not-used]: ', gitlint.format_comment({ 'line': 1, 'column': 2, 'severity': 'Error', 'message_id': 'not-used', })) def test_get_vcs_git(self): self.git_repository_root.return_value = self.root self.assertEquals((gitlint.git, self.root), gitlint.get_vcs_root()) def test_get_vcs_hg(self): self.git_repository_root.return_value = None self.hg_repository_root.return_value = self.root self.assertEquals((gitlint.hg, self.root), gitlint.get_vcs_root()) def test_get_vcs_none(self): self.git_repository_root.return_value = None self.hg_repository_root.return_value = None self.assertEquals((None, None), gitlint.get_vcs_root())
TimeIncOSS/git-lint
test/unittest/test_gitlint.py
Python
apache-2.0
20,634
# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """cgsnapshot interface (v2 extension).""" import six try: from urllib import urlencode except ImportError: from urllib.parse import urlencode from cinderclient import base class Cgsnapshot(base.Resource): """A cgsnapshot is snapshot of a consistency group.""" def __repr__(self): return "<cgsnapshot: %s>" % self.id def delete(self): """Delete this cgsnapshot.""" self.manager.delete(self) def update(self, **kwargs): """Update the name or description for this cgsnapshot.""" self.manager.update(self, **kwargs) class CgsnapshotManager(base.ManagerWithFind): """Manage :class:`Cgsnapshot` resources.""" resource_class = Cgsnapshot def create(self, consistencygroup_id, name=None, description=None, user_id=None, project_id=None): """Creates a cgsnapshot. :param consistencygroup: Name or uuid of a consistencygroup :param name: Name of the cgsnapshot :param description: Description of the cgsnapshot :param user_id: User id derived from context :param project_id: Project id derived from context :rtype: :class:`Cgsnapshot` """ body = {'cgsnapshot': {'consistencygroup_id': consistencygroup_id, 'name': name, 'description': description, 'user_id': user_id, 'project_id': project_id, 'status': "creating", }} return self._create('/cgsnapshots', body, 'cgsnapshot') def get(self, cgsnapshot_id): """Get a cgsnapshot. :param cgsnapshot_id: The ID of the cgsnapshot to get. :rtype: :class:`Cgsnapshot` """ return self._get("/cgsnapshots/%s" % cgsnapshot_id, "cgsnapshot") def list(self, detailed=True, search_opts=None): """Lists all cgsnapshots. :rtype: list of :class:`Cgsnapshot` """ if search_opts is None: search_opts = {} qparams = {} for opt, val in six.iteritems(search_opts): if val: qparams[opt] = val query_string = "?%s" % urlencode(qparams) if qparams else "" detail = "" if detailed: detail = "/detail" return self._list("/cgsnapshots%s%s" % (detail, query_string), "cgsnapshots") def delete(self, cgsnapshot): """Delete a cgsnapshot. :param cgsnapshot: The :class:`Cgsnapshot` to delete. """ self._delete("/cgsnapshots/%s" % base.getid(cgsnapshot)) def update(self, cgsnapshot, **kwargs): """Update the name or description for a cgsnapshot. :param cgsnapshot: The :class:`Cgsnapshot` to update. """ if not kwargs: return body = {"cgsnapshot": kwargs} self._update("/cgsnapshots/%s" % base.getid(cgsnapshot), body) def _action(self, action, cgsnapshot, info=None, **kwargs): """Perform a cgsnapshot "action." """ body = {action: info} self.run_hooks('modify_body_for_action', body, **kwargs) url = '/cgsnapshots/%s/action' % base.getid(cgsnapshot) return self.api.client.post(url, body=body)
hybrid-storage-dev/cinder-client-fs-111t-hybrid-cherry
v2/cgsnapshots.py
Python
apache-2.0
4,004
# # -*- coding: utf-8 -*- # # from django.db import models # from protoLib.models import * # # # class WflowAdminResume(ProtoModelExt): # """ Contains the latest news summary that require administrator action # When creating a record of WFlow you can create an instance of this table or increment the counter # You will also have to go shares wFlow tables (parameter = wFlowEntities) # and tell the states to verify (parameterTag = 0) # """ # # viewEntity = models.CharField(max_length=250 , blank=False, null=False) # activityCount = models.IntegerField(blank=False, null=False) # # def __str__(self): # return self.viewEntity + '.' + self.smOwningTeam.__str__() # # protoExt = { # "actions": [ # { "name": "doWFlowResume", # "selectionMode" : "none", # "refreshOnComplete" : True # }, # ] # } # # # class WflowUserReponse(ProtoModelExt): # """ Contains the results of administrator actions # """ # # viewEntity = models.CharField(max_length=250 , blank=False, null=False) # wfAction = models.CharField(max_length=250 , blank=False, null=False) # strKey = models.CharField(max_length=250 , blank=False, null=False) # adminMsg = models.CharField(max_length=250 , blank=False, null=False) # # def __str__(self): # return self.viewEntity # # # class UserFiles(ProtoModelExt): # # docfile = models.FileField(upload_to='media/%Y/%m/%d') # description = models.TextField(verbose_name=u'Description', blank=True, null=True) # # # # class DiscreteValue(models.Model): # # TODO : Manejo de discretas # # Ahora se hace como un arbol para por ejemplo manejar el idioma fr.ca es.ca # # Arrancar con filtro inicial discreteValue = None # # code = models.CharField(blank=False, null=False, max_length=200) # value = models.CharField(blank=False, null=False, max_length=200) # # description = models.TextField(blank=True, null=True) # title = models.ForeignKey('DiscreteValue', blank=True, null=True) # # def __str__(self): # if self.title is None: # return self.code # else: # return self.title.code + '.' + self.code # # class Meta: # unique_together = ('title', 'value',) # # protoExt = { # "gridConfig" : { # "listDisplay": ["__str__", "description" ] # } # } # # # # # class PtFunction(models.Model): # """ # En esta tabla se guardan funciones q seran ejectudas dinamicamente # deben reespetar la syntaxis python y se precargaran con funcione de base # por ejemplo el perfil de usuario y el acceso a modelos # # Siempre debe retornar algo # """ # # # nombre de la funcion # code = models.CharField(blank=False, null=False, max_length=200 , unique=True) # # # este modelo se importa y se ofrece a la funcion # modelName = models.CharField(blank=False, null=False, max_length=200) # # # lista separada por comas de los nombres de los argumentos # arguments = models.CharField(blank=False, null=False, max_length=400) # # functionBody = models.TextField(blank=True, null=True) # # tag = models.CharField(blank=False, null=False, max_length=200) # description = models.TextField(verbose_name=u'Descriptions', blank=True, null=True) # # # def __str__(self): # return self.code + '.' + self.tag # # #
DarioGT/docker-carra
src/protovarios/models.py
Python
mit
3,499
import asyncore import socket import json class AsynService(asyncore.dispatcher): def __init__(self, host, port, engine): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.bind((host, port)) self.listen(1) self.running = False self.connections = [] self.processor = engine.processor def start(self): self.running = True while self.running: asyncore.loop(timeout=0.1) def stop(self): self.running = False def handle_accept(self): socket, address = self.accept() print("Connection from: ", address) conn = Handler(socket, address, self) self.connections.append(conn) def broadcast(self, message): print("Broadcasting new player: ", len(self.connections)) for conn in self.connections: conn.write(message) def shutdown(self): self.close() for conn in self.connections: conn.write("Shutting down") conn.shutdown() class Handler(asyncore.dispatcher): def __init__(self, socket, address, server): asyncore.dispatcher.__init__(self, socket) self.player = None self.address = address self.server = server self.obuffer = [] self.user = None def write(self, data): try: if isinstance(data, str): self.obuffer.append(bytes(data, 'utf8')) else: self.obuffer.append(bytes(json.dumps(data), 'utf8')) except BaseException as e: print("Error appending output buffer: ", e) def shutdown(self): self.obuffer.append(None) def handle_read(self): # Should accumlate all data until a full command has been received. data = self.recv(4096) if not data: print("No data given, closing connection") self.close() else: decoded = data.decode("utf-8") data_json = None try: data_json = json.loads(decoded) except BaseException as e: self.write({ "error": str(e) }) return try: response = self.server.processor.handle(self, data_json) if response is not None: self.write(response) except BaseException as e: self.write({ "error": str(e) }) def handle_close(self): self.server.connections.remove(self) def writable(self): return len(self.obuffer) > 0 def handle_write(self): if self.obuffer[0] is None: self.close() return sent = self.send(self.obuffer[0]) if sent >= len(self.obuffer[0]): self.obuffer.pop(0) else: self.obuffer[0] = self.obuffer[0][sent:]
astrellon/maze
server/python/cmds/command_asyn_service.py
Python
mit
3,001
# -*- coding: utf-8 -*- def command(): return "stop-all-instance" def init_argument(parser): parser.add_argument("--farm-no", required=True) def execute(requester, args): farm_no = args.farm_no parameters = {} parameters["FarmNo"] = farm_no return requester.execute("/StopAllInstance", parameters)
primecloud-controller-org/pcc-cli
src/pcc/api/instance/stop_all_instance.py
Python
apache-2.0
327
# Copyright (c) 2014 Adafruit Industries # Author: Tony DiCola # Based on Adafruit_I2C.py created by Kevin Townsend. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import logging import os import subprocess import Adafruit_GPIO.Platform as Platform def reverseByteOrder(data): """DEPRECATED: See https://github.com/adafruit/Adafruit_Python_GPIO/issues/48""" # # Courtesy Vishal Sapre # byteCount = len(hex(data)[2:].replace('L','')[::2]) # val = 0 # for i in range(byteCount): # val = (val << 8) | (data & 0xff) # data >>= 8 # return val raise RuntimeError('reverseByteOrder is deprecated! See: https://github.com/adafruit/Adafruit_Python_GPIO/issues/48') def get_default_bus(): """Return the default bus number based on the device platform. For a Raspberry Pi either bus 0 or 1 (based on the Pi revision) will be returned. For a Beaglebone Black the first user accessible bus, 1, will be returned. """ plat = Platform.platform_detect() if plat == Platform.RASPBERRY_PI: if Platform.pi_revision() == 1: # Revision 1 Pi uses I2C bus 0. return 0 else: # Revision 2 Pi uses I2C bus 1. return 1 elif plat == Platform.BEAGLEBONE_BLACK: # Beaglebone Black has multiple I2C buses, default to 1 (P9_19 and P9_20). return 1 else: raise RuntimeError('Could not determine default I2C bus for platform.') def get_i2c_device(address, busnum=None, i2c_interface=None, **kwargs): """Return an I2C device for the specified address and on the specified bus. If busnum isn't specified, the default I2C bus for the platform will attempt to be detected. """ if busnum is None: busnum = get_default_bus() return Device(address, busnum, i2c_interface, **kwargs) def require_repeated_start(): """Enable repeated start conditions for I2C register reads. This is the normal behavior for I2C, however on some platforms like the Raspberry Pi there are bugs which disable repeated starts unless explicitly enabled with this function. See this thread for more details: http://www.raspberrypi.org/forums/viewtopic.php?f=44&t=15840 """ plat = Platform.platform_detect() if plat == Platform.RASPBERRY_PI and os.path.exists('/sys/module/i2c_bcm2708/parameters/combined'): # On the Raspberry Pi there is a bug where register reads don't send a # repeated start condition like the kernel smbus I2C driver functions # define. As a workaround this bit in the BCM2708 driver sysfs tree can # be changed to enable I2C repeated starts. subprocess.check_call('chmod 666 /sys/module/i2c_bcm2708/parameters/combined', shell=True) subprocess.check_call('echo -n 1 > /sys/module/i2c_bcm2708/parameters/combined', shell=True) # Other platforms are a no-op because they (presumably) have the correct # behavior and send repeated starts. class Device(object): """Class for communicating with an I2C device using the adafruit-pureio pure python smbus library, or other smbus compatible I2C interface. Allows reading and writing 8-bit, 16-bit, and byte array values to registers on the device.""" def __init__(self, address, busnum, i2c_interface=None): """Create an instance of the I2C device at the specified address on the specified I2C bus number.""" self._address = address if i2c_interface is None: # Use pure python I2C interface if none is specified. import Adafruit_PureIO.smbus self._bus = Adafruit_PureIO.smbus.SMBus(busnum) else: # Otherwise use the provided class to create an smbus interface. self._bus = i2c_interface(busnum) self._logger = logging.getLogger('Adafruit_I2C.Device.Bus.{0}.Address.{1:#0X}' \ .format(busnum, address)) def writeRaw8(self, value): """Write an 8-bit value on the bus (without register).""" value = value & 0xFF self._bus.write_byte(self._address, value) self._logger.debug("Wrote 0x%02X", value) def write8(self, register, value): """Write an 8-bit value to the specified register.""" value = value & 0xFF self._bus.write_byte_data(self._address, register, value) self._logger.debug("Wrote 0x%02X to register 0x%02X", value, register) def write16(self, register, value): """Write a 16-bit value to the specified register.""" value = value & 0xFFFF self._bus.write_word_data(self._address, register, value) self._logger.debug("Wrote 0x%04X to register pair 0x%02X, 0x%02X", value, register, register+1) def writeList(self, register, data): """Write bytes to the specified register.""" self._bus.write_i2c_block_data(self._address, register, data) self._logger.debug("Wrote to register 0x%02X: %s", register, data) def readList(self, register, length): """Read a length number of bytes from the specified register. Results will be returned as a bytearray.""" results = self._bus.read_i2c_block_data(self._address, register, length) self._logger.debug("Read the following from register 0x%02X: %s", register, results) return results def readRaw8(self): """Read an 8-bit value on the bus (without register).""" result = self._bus.read_byte(self._address) & 0xFF self._logger.debug("Read 0x%02X", result) return result def readU8(self, register): """Read an unsigned byte from the specified register.""" result = self._bus.read_byte_data(self._address, register) & 0xFF self._logger.debug("Read 0x%02X from register 0x%02X", result, register) return result def readS8(self, register): """Read a signed byte from the specified register.""" result = self.readU8(register) if result > 127: result -= 256 return result def readU16(self, register, little_endian=True): """Read an unsigned 16-bit value from the specified register, with the specified endianness (default little endian, or least significant byte first).""" result = self._bus.read_word_data(self._address,register) & 0xFFFF self._logger.debug("Read 0x%04X from register pair 0x%02X, 0x%02X", result, register, register+1) # Swap bytes if using big endian because read_word_data assumes little # endian on ARM (little endian) systems. if not little_endian: result = ((result << 8) & 0xFF00) + (result >> 8) return result def readS16(self, register, little_endian=True): """Read a signed 16-bit value from the specified register, with the specified endianness (default little endian, or least significant byte first).""" result = self.readU16(register, little_endian) if result > 32767: result -= 65536 return result def readU16LE(self, register): """Read an unsigned 16-bit value from the specified register, in little endian byte order.""" return self.readU16(register, little_endian=True) def readU16BE(self, register): """Read an unsigned 16-bit value from the specified register, in big endian byte order.""" return self.readU16(register, little_endian=False) def readS16LE(self, register): """Read a signed 16-bit value from the specified register, in little endian byte order.""" return self.readS16(register, little_endian=True) def readS16BE(self, register): """Read a signed 16-bit value from the specified register, in big endian byte order.""" return self.readS16(register, little_endian=False)
adafruit/Adafruit_Python_GPIO
Adafruit_GPIO/I2C.py
Python
mit
9,083
#!/usr/bin/env python3 # -*-coding:UTF-8 -* import os import sys import json import redis from TorSplashCrawler import TorSplashCrawler sys.path.append(os.path.join(os.environ['AIL_BIN'], 'lib/')) import ConfigLoader import crawlers if __name__ == '__main__': if len(sys.argv) != 2: print('usage:', 'tor_crawler.py', 'uuid') exit(1) config_loader = ConfigLoader.ConfigLoader() redis_cache = config_loader.get_redis_conn("Redis_Cache") config_loader = None # get crawler config key uuid = sys.argv[1] # get configs crawler_json = json.loads(redis_cache.get('crawler_request:{}'.format(uuid))) splash_url = crawler_json['splash_url'] service_type = crawler_json['service_type'] url = crawler_json['url'] domain = crawler_json['domain'] port = crawler_json['port'] original_item = crawler_json['item'] crawler_options = crawler_json['crawler_options'] date = crawler_json['date'] requested_mode = crawler_json['requested'] if crawler_options['cookiejar_uuid']: cookies = crawlers.load_crawler_cookies(crawler_options['cookiejar_uuid'], domain, crawler_type=service_type) else: cookies = [] redis_cache.delete('crawler_request:{}'.format(uuid)) try: crawler = TorSplashCrawler(splash_url, crawler_options) crawler.crawl(splash_url, service_type, crawler_options, date, requested_mode, url, domain, port, cookies, original_item) except Exception as e: print(e) print(e, file=sys.stderr)
CIRCL/AIL-framework
bin/torcrawler/tor_crawler.py
Python
agpl-3.0
1,546
import time import inspect def log(*objects, sep=' ', end='\n'): """Function used to log activities to console. Basically print() on steroids.""" callerframerecord = inspect.stack()[1] frame = callerframerecord[0] info = inspect.getframeinfo(frame) filename = info.filename.split('/')[len(info.filename.split('/')) - 1] func = info.function lineno = info.lineno msg = time.strftime("%H:%M:%S") + "\t" msg += filename + ": " + func + ": " + str(lineno) + ":\t" for obj in objects: msg += str(obj) + sep msg = msg.rstrip(sep) print(msg, end=end)
Ankhee/steam-games-graph
scripts/utils.py
Python
gpl-3.0
618
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import date from datetime import timedelta from pathlib import Path from pprint import pformat import fileinput import json import logging import os import shutil import sys from google.cloud import bigquery import pytest from clouddq.integration.bigquery.bigquery_client import BigQueryClient from clouddq.main import main from clouddq.runners.dbt.dbt_runner import DbtRunner from clouddq.runners.dbt.dbt_utils import get_dbt_invocation_id from clouddq.utils import working_directory logger = logging.getLogger(__name__) class TestDqAdvancedRules: @pytest.fixture(scope="session") def client(self): """Get BigQuery Client using discovered ADC""" client = BigQueryClient() yield client client.close_connection() @pytest.fixture(scope="session") def prepare_test_input_data( self, client, test_data, gcp_project_id, gcp_dataplex_bigquery_dataset_id ): input_files_csv = ["accuracy_check_distribution_not_ok.csv", "accuracy_check_distribution_ok.csv", "accuracy_check_simple.csv", "completeness_check_not_ok.csv", "completeness_check_ok.csv", "conformity_check_not_ok.csv", "conformity_check_ok.csv", "different_volumes_per_period.csv", "ingestion_day_level.csv", "ingestion_month_level.csv", "ingestion_timestamp_level.csv", "reference_check_not_ok.csv", "reference_check_ok.csv", "reference_data.csv", "reference_data_subquery.csv", "reference_data_subquery2.csv", "uniqueness_check_not_ok.csv", "uniqueness_check_ok.csv"] input_files_json = ["complex_rules_not_ok.json", "complex_rules_ok.json", "reference_check_subquery2_not_ok.json", "reference_check_subquery2_ok.json", "reference_check_subquery_not_ok.json", "reference_check_subquery_ok.json"] # check if the data files exist, if not, ignore - data has been loaded offline if not os.path.exists(test_data / "advanced_rules"): return client = client.get_connection() # override some dates (for the ingestion check - it's looking backwards from the current date) # fileinput supports inline editing, it outputs the stdout to the file with fileinput.FileInput(test_data / "advanced_rules/ingestion_day_level.csv", inplace=True, backup='.bak') as file: day1 = (date.today() - timedelta(days=10)).strftime("%Y-%m-%d") day2 = (date.today() - timedelta(days=11)).strftime("%Y-%m-%d") for line in file: line = line.replace("2021-12-15", day1) line = line.replace("2021-12-14", day2) print(line, end='') with fileinput.FileInput(test_data / "advanced_rules/ingestion_month_level.csv", inplace=True, backup='.bak') as file: today = date.today() first = today.replace(day=1) lastMonth = first - timedelta(days=1) for line in file: line = line.replace("202111", lastMonth.strftime("%Y%m")) print(line, end='') with fileinput.FileInput(test_data / "advanced_rules/ingestion_timestamp_level.csv", inplace=True, backup='.bak') as file: today = date.today() first = today.replace(day=1) lastMonth = first - timedelta(days=1) for line in file: line = line.replace("2021-12", lastMonth.strftime("%Y-%m")) print(line, end='') for filename in input_files_csv + input_files_json: with open(test_data / f"advanced_rules/{filename}", "rb") as source_file: table_id = f"{gcp_project_id}.{gcp_dataplex_bigquery_dataset_id}.{os.path.splitext(filename)[0]}" file_format = os.path.splitext(filename)[1][1:] job_config = bigquery.LoadJobConfig(source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON if file_format == "json" else bigquery.SourceFormat.CSV, autodetect=True, write_disposition="WRITE_TRUNCATE") job = client.load_table_from_file(source_file, table_id, job_config=job_config) job.result() # Waits for the job to complete. table = client.get_table(table_id) # Make an API request. logger.info(f"Loaded {table.num_rows} rows and {len(table.schema)} columns to {table_id}") @pytest.fixture(scope="session") def create_expected_results_table( self, client, test_resources, gcp_project_id, target_bq_result_dataset_name, target_bq_result_table_name): client = client.get_connection() table_id = ( f"{gcp_project_id}.{target_bq_result_dataset_name}." f"{target_bq_result_table_name}_advanced_rules_expected" ) job_config = bigquery.LoadJobConfig( source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON, schema=[ bigquery.SchemaField("null_count", "INTEGER"), bigquery.SchemaField("failed_count", "INTEGER"), bigquery.SchemaField("complex_rule_validation_success_flag", "BOOLEAN"), bigquery.SchemaField("rows_validated", "INTEGER"), bigquery.SchemaField("progress_watermark", "BOOLEAN"), bigquery.SchemaField("rule_id", "STRING"), bigquery.SchemaField("complex_rule_validation_errors_count", "INTEGER"), bigquery.SchemaField("metadata_json_string", "STRING"), bigquery.SchemaField("column_id", "STRING"), bigquery.SchemaField("dimension", "STRING"), bigquery.SchemaField("success_count", "INTEGER"), bigquery.SchemaField("rule_binding_id", "STRING"), ], write_disposition="WRITE_TRUNCATE") with open(test_resources / "dq_advanced_rules_expected_results.json", "rb") as source_file: json_data = json.loads(source_file.read()) job = client.load_table_from_json(json_data, table_id, job_config=job_config) job.result() # Waits for the job to complete. table = client.get_table(table_id) # Make an API request. logger.info(f"Loaded {table.num_rows} rows and {len(table.schema)} columns to {table_id}") return table_id def test_advanced_dq_rules( self, runner, temp_configs_from_dq_advanced_rules_configs, gcp_application_credentials, gcp_project_id, gcp_bq_dataset, gcp_bq_region, target_bq_result_dataset_name, target_bq_result_table_name, tmp_path, client, gcp_impersonation_credentials, gcp_sa_key, create_expected_results_table, prepare_test_input_data, source_dq_advanced_rules_configs_path, test_resources, caplog, ): caplog.set_level(logging.INFO, logger="clouddq") try: temp_dir = Path(tmp_path).joinpath("cloud_dq_working_dir") temp_dir.mkdir(parents=True) with working_directory(temp_dir): logger.info(f"test_last_modified_in_dq_summary {gcp_application_credentials}") target_table = f"{gcp_project_id}.{target_bq_result_dataset_name}.{target_bq_result_table_name}" args = [ "ALL", f"{temp_configs_from_dq_advanced_rules_configs}", f"--gcp_project_id={gcp_project_id}", f"--gcp_bq_dataset_id={gcp_bq_dataset}", f"--gcp_region_id={gcp_bq_region}", f"--target_bigquery_summary_table={target_table}", "--enable_experimental_bigquery_entity_uris" ] result = runner.invoke(main, args) logger.info(result.output) assert result.exit_code == 0 # Prepare dbt runtime dbt_runner = DbtRunner( dbt_path=None, dbt_profiles_dir=None, environment_target="Dev", gcp_project_id=gcp_project_id, gcp_region_id=gcp_bq_region, gcp_bq_dataset_id=gcp_bq_dataset, gcp_service_account_key_path=gcp_sa_key, gcp_impersonation_credentials=gcp_impersonation_credentials, ) dbt_path = dbt_runner.get_dbt_path() invocation_id = get_dbt_invocation_id(dbt_path) logger.info(f"Dbt invocation id is: {invocation_id}") # Test the DQ expected results sql = f""" WITH validation_errors AS ( SELECT rule_binding_id, rule_id, column_id, dimension, metadata_json_string, progress_watermark, rows_validated, complex_rule_validation_errors_count, complex_rule_validation_success_flag, success_count, failed_count, null_count FROM `{gcp_project_id}.{target_bq_result_dataset_name}.{target_bq_result_table_name}` WHERE invocation_id='{invocation_id}' EXCEPT DISTINCT SELECT rule_binding_id, rule_id, column_id, dimension, metadata_json_string, progress_watermark, rows_validated, complex_rule_validation_errors_count, complex_rule_validation_success_flag, success_count, failed_count, null_count FROM `{create_expected_results_table}` ) SELECT TO_JSON_STRING(validation_errors) FROM validation_errors; """ logger.info(f"SQL query is: {sql}") query_job = client.execute_query(sql) results = query_job.result() logger.info("Query done") rows = list(results) logger.info(f"Query execution returned {len(rows)} rows") if len(rows): logger.warning( "Rows with values not matching the expected " "content in 'tests/resources/dq_advanced_rules_expected_results.csv':" ) for row in rows: record = json.loads(str(row[0])) logger.warning(f"\n{pformat(record)}") failed_rows = [json.loads(row[0]) for row in rows] failed_rows_rule_binding_ids = [row['rule_binding_id'] for row in failed_rows] failed_rows_rule_ids = [row['rule_id'] for row in failed_rows] with open(test_resources / "dq_advanced_rules_expected_results.json", "rb") as source_file: expected_json = [] json_data = json.loads(source_file.read()) for record in json_data: if record['rule_binding_id'] not in failed_rows_rule_binding_ids: continue if record['rule_id'] not in failed_rows_rule_ids: continue expected_json.append(record) assert failed_rows == expected_json finally: shutil.rmtree(temp_dir) if __name__ == "__main__": raise SystemExit(pytest.main([__file__, '-vv', '-rP', '-n', 'auto'] + sys.argv[1:]))
GoogleCloudPlatform/cloud-data-quality
tests/integration/test_advanced_dq_rules.py
Python
apache-2.0
12,219
from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext src = "../libturboparser/" setup(cmdclass={'build_ext': build_ext}, ext_modules=[Extension("turboparser", ["turbo_parser.pyx"], language="c++", include_dirs=["../src/semantic_parser", "../src/parser", "../src/entity_recognizer/", "../src/tagger/", "../src/sequence/", "../src/classifier/", "../src/util", "../deps/local/include/"], library_dirs=[src, "../deps/local/lib/"], libraries=["turboparser", "gflags", "glog", "ad3"])])
PhdDone/TurboParser
python/setup.py
Python
lgpl-3.0
556
# Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for test_utils.""" import lingvo.compat as tf from lingvo.core import test_utils class TestUtilsTest(test_utils.TestCase): def testReplaceGoldenSingleFloat(self): old_line = ' CompareToGoldenSingleFloat(self, 1.489712, vs[0])\n' expected = ' CompareToGoldenSingleFloat(self, 1.000000, vs[0])\n' actual = test_utils.ReplaceGoldenSingleFloat(old_line, 1.0) self.assertEqual(expected, actual) old_line = ('test_utils.CompareToGoldenSingleFloat(self, -2.e-3, vs[0])' ' # pylint: disable=line-too-long\n') expected = ('test_utils.CompareToGoldenSingleFloat(self, 1.000000, vs[0])' ' # pylint: disable=line-too-long\n') actual = test_utils.ReplaceGoldenSingleFloat(old_line, 1.0) self.assertEqual(expected, actual) def CompareToGoldenSingleFloat(self, unused_v1, v2): return test_utils.ReplaceGoldenStackAnalysis(v2) def testReplaceGoldenStackAnalysis(self): v2 = 2.0 result = TestUtilsTest.CompareToGoldenSingleFloat(self, 1.0, v2) self.assertTrue(result[0].endswith('test_utils_test.py')) old_line = (' result = TestUtilsTest.CompareToGoldenSingleFloat(' 'self, 1.0, v2)\n') new_line = (' result = TestUtilsTest.CompareToGoldenSingleFloat(' 'self, 2.000000, v2)\n') self.assertEqual(old_line, result[2]) self.assertEqual(new_line, result[3]) if __name__ == '__main__': tf.test.main()
tensorflow/lingvo
lingvo/core/test_utils_test.py
Python
apache-2.0
2,160
import sys, os import re import unittest import traceback import pywin32_testutil # A list of demos that depend on user-interface of *any* kind. Tests listed # here are not suitable for unattended testing. ui_demos = """GetSaveFileName print_desktop win32cred_demo win32gui_demo win32gui_dialog win32gui_menu win32gui_taskbar win32rcparser_demo winprocess win32console_demo win32gui_devicenotify NetValidatePasswordPolicy""".split() # Other demos known as 'bad' (or at least highly unlikely to work) # cerapi: no CE module is built (CE via pywin32 appears dead) # desktopmanager: hangs (well, hangs for 60secs or so...) bad_demos = "cerapi desktopmanager win32comport_demo".split() argvs = { "rastest": ("-l",), } # re to pull apart an exception line into the exception type and the args. re_exception = re.compile("([a-zA-Z0-9_.]*): (.*)$") def find_exception_in_output(data): have_traceback = False for line in data.splitlines(): line = line.decode('ascii') # not sure what the correct encoding is... if line.startswith("Traceback ("): have_traceback = True continue if line.startswith(" "): continue if have_traceback: # first line not starting with a space since the traceback. # must be the exception! m = re_exception.match(line) if m: exc_type, args = m.groups() # get hacky - get the *real* exception object from the name. bits = exc_type.split(".", 1) if len(bits) > 1: mod = __import__(bits[0]) exc = getattr(mod, bits[1]) else: # probably builtin exc = eval(bits[0]) else: # hrm - probably just an exception with no args try: exc = eval(line.strip()) args = "()" except: return None # try and turn the args into real args. try: args = eval(args) except: pass if not isinstance(args, tuple): args = (args,) # try and instantiate the exception. try: ret = exc(*args) except: ret = None return ret # apparently not - keep looking... have_traceback = False class TestRunner: def __init__(self, argv): self.argv = argv def __call__(self): import subprocess p = subprocess.Popen(self.argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, _ = p.communicate() rc = p.returncode if rc: base = os.path.basename(self.argv[1]) # See if we can detect and reconstruct an exception in the output. reconstituted = find_exception_in_output(output) if reconstituted is not None: raise reconstituted raise AssertionError("%s failed with exit code %s. Output is:\n%s" % (base, rc, output)) def get_demo_tests(): import win32api ret = [] demo_dir = os.path.abspath(os.path.join(os.path.dirname(win32api.__file__), "Demos")) assert os.path.isdir(demo_dir), demo_dir for name in os.listdir(demo_dir): base, ext = os.path.splitext(name) if ext != ".py" or base in ui_demos or base in bad_demos: continue argv = (sys.executable, os.path.join(demo_dir, base+".py")) + \ argvs.get(base, ()) ret.append(unittest.FunctionTestCase(TestRunner(argv), description="win32/demos/" + name)) return ret def import_all(): # Some hacks for import order - dde depends on win32ui try: import win32ui except ImportError: pass # 'what-ev-a....' import win32api dir = os.path.dirname(win32api.__file__) num = 0 is_debug = os.path.basename(win32api.__file__).endswith("_d") for name in os.listdir(dir): base, ext = os.path.splitext(name) if (ext==".pyd") and \ name != "_winxptheme.pyd" and \ (is_debug and base.endswith("_d") or \ not is_debug and not base.endswith("_d")): try: __import__(base) except: print("FAILED to import", name) raise num += 1 def suite(): # Loop over all .py files here, except me :) try: me = __file__ except NameError: me = sys.argv[0] me = os.path.abspath(me) files = os.listdir(os.path.dirname(me)) suite = unittest.TestSuite() suite.addTest(unittest.FunctionTestCase(import_all)) for file in files: base, ext = os.path.splitext(file) if ext=='.py' and os.path.basename(me) != file: try: mod = __import__(base) except: print("FAILED to import test module %r" % base) traceback.print_exc() continue if hasattr(mod, "suite"): test = mod.suite() else: test = unittest.defaultTestLoader.loadTestsFromModule(mod) suite.addTest(test) for test in get_demo_tests(): suite.addTest(test) return suite class CustomLoader(pywin32_testutil.TestLoader): def loadTestsFromModule(self, module): return self.fixupTestsForLeakTests(suite()) if __name__=='__main__': pywin32_testutil.testmain(testLoader=CustomLoader())
huguesv/PTVS
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/test/testall.py
Python
apache-2.0
5,687
from django.shortcuts import render def list(request): return render(request, 'qa/list.html') def form(request): return render(request, 'qa/form.html')
bugness/ask-and-answer
qa/views.py
Python
mit
164
#### #### Give a report on the "sanity" of the users and groups YAML #### metadata files. #### #### Example usage to analyze the usual suspects: #### python3 sanity-check-users-and-groups.py --help #### Get report of current problems: #### python3 ./scripts/sanity-check-users-and-groups.py --users metadata/users.yaml --groups metadata/groups.yaml #### Attempt to repair file (note that we go through json2yaml as libyaml output does not seem compatible with kwalify): #### python3 ./scripts/sanity-check-users-and-groups.py --users metadata/users.yaml --groups metadata/groups.yaml --repair --output /tmp/output.json && json2yaml --depth 10 /tmp/output.json > /tmp/users.yaml #### Check new yaml: #### kwalify -E -f metadata/users.schema.yaml /tmp/users.yaml #### Run report on new yaml. #### reset && python3 ./scripts/sanity-check-users-and-groups.py --users /tmp/users.yaml --groups metadata/groups.yaml import sys import argparse import logging import yaml import json ## Logger basic setup. logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger('sanity') LOGGER.setLevel(logging.WARNING) ## Make sure we exit in a way that will get Jenkins's attention. DIED_SCREAMING_P = False def die_screaming(string): """ Die and take our toys home. """ global DIED_SCREAMING_P LOGGER.error(string) DIED_SCREAMING_P = True #sys.exit(1) def main(): ## Deal with incoming. parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-v', '--verbose', action='store_true', help='More verbose output') parser.add_argument('-u', '--users', help='The users.yaml file to act on') parser.add_argument('-g', '--groups', help='The groups.yaml file to act on') parser.add_argument("-r", "--repair", action="store_true", help="Attempt to repair groups and update old permissions") parser.add_argument("-o", "--output", help="The file to output internal structure to (if repairing)") args = parser.parse_args() if args.verbose: LOGGER.setLevel(logging.INFO) LOGGER.info('Verbose: on') ## Ensure targets. if not args.users: die_screaming('need a users argument') LOGGER.info('Will operate on users: ' + args.users) if not args.groups: die_screaming('need a groups argument') LOGGER.info('Will operate on groups: ' + args.groups) ## Read. users = None with open(args.users) as mhandle: users = yaml.safe_load(mhandle.read()) groups_linear = None with open(args.groups) as mhandle: groups_linear = yaml.safe_load(mhandle.read()) ## Switch linear groups to lookup by URI. groups_lookup = {} for group in groups_linear: groups_lookup[group['id']] = group['label'] violations = { "uri": [], "groups": [], } ## Cycle through users and see if we find any violations. for index, user in enumerate(users): nick = user.get('nickname', '???') ## Update old authorizations type. if args.repair: if user.get("authorizations", {}).get("noctua-go", False): print('REPAIR?: Update perms for ' + nick) auths = user["authorizations"]["noctua-go"] del user["authorizations"]["noctua-go"] # delete old way user["authorizations"]["noctua"] = { "go": auths } users[index] = user # save new back into list ## Does the user have noctua perms? if user.get('authorizations', False): auth = user.get('authorizations', {}) if auth.get('noctua-go', False) or \ (auth.get('noctua', False) and auth['noctua'].get('go', False)): #print('Has perms: ' + user.get('nickname', '???')) ## 1: If so, do they have a URI? if not user.get('uri', False): die_screaming(user.get('nickname', '???') +\ ' has no "uri"') #print(nick + ' has no "uri"') violations["uri"].append(nick) else: ## 2: Is it an ORCID? if user.get('uri', 'NIL').find('orcid') == -1: die_screaming(user.get('nickname', '???') +\ ' "uri" is not an ORCID.') #print(nick + ' "uri" is not an ORCID.') violations["uri"].append(nick) ## 3: If so, do they have a populated groups? if not user.get('groups', False) or len(user["groups"]) == 0: die_screaming(user.get('nickname', '???') +\ ' has no "groups"') #print(nick + ' has no "groups"') if user.get("organization", False): org = user["organization"] print(nick + " could try org {}".format(org)) matching_groups = list(filter(lambda g: org == g["label"] or org == g["shorthand"], groups_linear)) if len(matching_groups) > 0: print("REPAIR?: Use group: {}".format(matching_groups[0]["id"])) if args.repair: user["groups"] = [matching_groups[0]["id"]] users[index] = user else: violations["groups"].append(nick) else: ## 4: If so, are all entries in groups? for gid in user.get('groups'): if not groups_lookup.get(gid, False): die_screaming(user.get('nickname', '???') +\ ' has mistaken group entry: ' + gid) #print(nick + ' has mistaken group entry: ' + gid) violates_both = set(violations["uri"]).intersection(violations["groups"]) just_uri = set(violations["uri"]).difference(violates_both) just_groups = set(violations["groups"]).difference(violates_both) ## Check privs. for index, user in enumerate(users): if user["nickname"] in just_uri or user["nickname"] in just_groups: # If we have an auth with noctua-go with allow-edit set to True if user.get("authorizations", {}).get("noctua", {}).get("go", {}).get("allow-edit", False): print("REPAIR?: Revoke {} noctua-go edit privileges.".format(user["nickname"])) if args.repair: del user["authorizations"] users[index] = user print("\nNo URI, or no ORCID:") print("===================") print("\n".join(just_uri)) print("\nNo Groups:") print("===================") print("\n".join(just_groups)) print("\nBoth Bad:") print("===================") print("\n".join(violates_both)) #print(json.dumps(users)) #print(yaml.dump(users, default_flow_style=False)) #yaml.dump(data, default_flow_style=False) if args.output: with open(args.output, 'w+') as fhandle: fhandle.write(json.dumps(users, sort_keys=True, indent=4)) ## TODO: implement hard checks above later. if DIED_SCREAMING_P: print('Errors happened, alert the sheriff.') sys.exit(1) else: print('Non-failing run.') ## You saw it coming... if __name__ == '__main__': main()
geneontology/go-site
scripts/sanity-check-users-and-groups.py
Python
bsd-3-clause
7,704
from time import sleep from scrapy.downloadermiddlewares.retry import RetryMiddleware from scrapers.manolo_scraper import settings class ProxyMiddleware(object): def process_request(self, request, spider): try: proxy = settings.HTTP_PROXY request.meta['proxy'] = proxy except Exception as error: print(error) class CustomRetryMiddleware(RetryMiddleware): def process_exception(self, request, exception, spider): if ( isinstance(exception, self.EXCEPTIONS_TO_RETRY) and not request.meta.get('dont_retry', False) ): retry = request.meta.get('retry') sleeping = 5 * retry * retry print(f'sleeping {sleeping}, {retry}, {exception}, {self.max_retry_times}') request.meta['retry'] = retry + 1 sleep(sleeping) return self._retry(request, exception, spider)
aniversarioperu/django-manolo
scrapers/manolo_scraper/middlewares.py
Python
bsd-3-clause
928
import sys sys.path.insert(1, "../../") import h2o, tests def demo_gbm(): h2o.demo(func="gbm", interactive=False, test=True) if __name__ == "__main__": tests.run_test(sys.argv, demo_gbm)
brightchen/h2o-3
h2o-py/tests/testdir_demos/pyunit_gbm_demo.py
Python
apache-2.0
198
#!/usr/bin/python # -*- Mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details: # # Copyright (C) 2008 Novell, Inc. # Copyright (C) 2009 Red Hat, Inc. # import sys, dbus, time, os, string, subprocess, socket DBUS_INTERFACE_PROPERTIES='org.freedesktop.DBus.Properties' MM_DBUS_SERVICE='org.freedesktop.ModemManager' MM_DBUS_PATH='/org/freedesktop/ModemManager' MM_DBUS_INTERFACE='org.freedesktop.ModemManager' MM_DBUS_INTERFACE_MODEM='org.freedesktop.ModemManager.Modem' MM_DBUS_INTERFACE_MODEM_CDMA='org.freedesktop.ModemManager.Modem.Cdma' MM_DBUS_INTERFACE_MODEM_GSM_CARD='org.freedesktop.ModemManager.Modem.Gsm.Card' MM_DBUS_INTERFACE_MODEM_GSM_NETWORK='org.freedesktop.ModemManager.Modem.Gsm.Network' MM_DBUS_INTERFACE_MODEM_SIMPLE='org.freedesktop.ModemManager.Modem.Simple' def get_cdma_band_class(band_class): if band_class == 1: return "800MHz" elif band_class == 2: return "1900MHz" else: return "Unknown" def get_reg_state(state): if state == 1: return "registered (roaming unknown)" elif state == 2: return "registered on home network" elif state == 3: return "registered on roaming network" else: return "unknown" def cdma_inspect(proxy, dump_private): cdma = dbus.Interface(proxy, dbus_interface=MM_DBUS_INTERFACE_MODEM_CDMA) esn = "<private>" if dump_private: try: esn = cdma.GetEsn() except dbus.exceptions.DBusException: esn = "<unavailable>" print "" print "ESN: %s" % esn try: (cdma_1x_state, evdo_state) = cdma.GetRegistrationState() print "1x State: %s" % get_reg_state (cdma_1x_state) print "EVDO State: %s" % get_reg_state (evdo_state) except dbus.exceptions.DBusException, e: print "Error reading registration state: %s" % e try: quality = cdma.GetSignalQuality() print "Signal quality: %d" % quality except dbus.exceptions.DBusException, e: print "Error reading signal quality: %s" % e try: info = cdma.GetServingSystem() print "Class: %s" % get_cdma_band_class(info[0]) print "Band: %s" % info[1] print "SID: %d" % info[2] except dbus.exceptions.DBusException, e: print "Error reading serving system: %s" % e def cdma_connect(proxy, user, password): # Modem.Simple interface simple = dbus.Interface(proxy, dbus_interface=MM_DBUS_INTERFACE_MODEM_SIMPLE) try: simple.Connect({'number':"#777"}, timeout=92) print "\nConnected!" return True except Exception, e: print "Error connecting: %s" % e return False def get_gsm_network_mode(modem): mode = modem.GetNetworkMode() if mode == 0x0: mode = "Unknown" elif mode == 0x1: mode = "Any" elif mode == 0x2: mode = "GPRS" elif mode == 0x4: mode = "EDGE" elif mode == 0x8: mode = "UMTS" elif mode == 0x10: mode = "HSDPA" elif mode == 0x20: mode = "2G Preferred" elif mode == 0x40: mode = "3G Preferred" elif mode == 0x80: mode = "2G Only" elif mode == 0x100: mode = "3G Only" elif mode == 0x200: mode = "HSUPA" elif mode == 0x400: mode = "HSPA" else: mode = "(Unknown)" print "Mode: %s" % mode def get_gsm_band(modem): band = modem.GetBand() if band == 0x0: band = "Unknown" elif band == 0x1: band = "Any" elif band == 0x2: band = "EGSM (900 MHz)" elif band == 0x4: band = "DCS (1800 MHz)" elif band == 0x8: band = "PCS (1900 MHz)" elif band == 0x10: band = "G850 (850 MHz)" elif band == 0x20: band = "U2100 (WCSMA 2100 MHZ, Class I)" elif band == 0x40: band = "U1700 (WCDMA 3GPP UMTS1800 MHz, Class III)" elif band == 0x80: band = "17IV (WCDMA 3GPP AWS 1700/2100 MHz, Class IV)" elif band == 0x100: band = "U800 (WCDMA 3GPP UMTS800 MHz, Class VI)" elif band == 0x200: band = "U850 (WCDMA 3GPP UMT850 MHz, Class V)" elif band == 0x400: band = "U900 (WCDMA 3GPP UMTS900 MHz, Class VIII)" elif band == 0x800: band = "U17IX (WCDMA 3GPP UMTS MHz, Class IX)" else: band = "(invalid)" print "Band: %s" % band def gsm_inspect(proxy, dump_private, do_scan): # Gsm.Card interface card = dbus.Interface(proxy, dbus_interface=MM_DBUS_INTERFACE_MODEM_GSM_CARD) imei = "<private>" imsi = "<private>" if dump_private: try: imei = card.GetImei() except dbus.exceptions.DBusException: imei = "<unavailable>" try: imsi = card.GetImsi() except dbus.exceptions.DBusException: imsi = "<unavailable>" print "IMEI: %s" % imei print "IMSI: %s" % imsi # Gsm.Network interface net = dbus.Interface(proxy, dbus_interface=MM_DBUS_INTERFACE_MODEM_GSM_NETWORK) try: quality = net.GetSignalQuality() print "Signal quality: %d" % quality except dbus.exceptions.DBusException, e: print "Error reading signal quality: %s" % e if not do_scan: return print "Scanning..." try: results = net.Scan(timeout=120) except dbus.exceptions.DBusException, e: print "Error scanning: %s" % e results = {} for r in results: status = r['status'] if status == "1": status = "available" elif status == "2": status = "current" elif status == "3": status = "forbidden" else: status = "(Unknown)" access_tech = "" try: access_tech_num = r['access-tech'] if access_tech_num == "0": access_tech = "(GSM)" elif access_tech_num == "1": access_tech = "(Compact GSM)" elif access_tech_num == "2": access_tech = "(UMTS)" elif access_tech_num == "3": access_tech = "(EDGE)" elif access_tech_num == "4": access_tech = "(HSDPA)" elif access_tech_num == "5": access_tech = "(HSUPA)" elif access_tech_num == "6": access_tech = "(HSPA)" except KeyError: pass if r.has_key('operator-long') and len(r['operator-long']): print "%s: %s %s" % (r['operator-long'], status, access_tech) elif r.has_key('operator-short') and len(r['operator-short']): print "%s: %s %s" % (r['operator-short'], status, access_tech) else: print "%s: %s %s" % (r['operator-num'], status, access_tech) def gsm_connect(proxy, apn, user, password): # Modem.Simple interface simple = dbus.Interface(proxy, dbus_interface=MM_DBUS_INTERFACE_MODEM_SIMPLE) try: opts = {'number':"*99#"} if apn is not None: opts['apn'] = apn if user is not None: opts['username'] = user if password is not None: opts['password'] = password simple.Connect(opts, timeout=120) print "\nConnected!" return True except Exception, e: print "Error connecting: %s" % e return False def pppd_find(): paths = ["/usr/local/sbin/pppd", "/usr/sbin/pppd", "/sbin/pppd"] for p in paths: if os.path.exists(p): return p return None def ppp_start(device, user, password, tmpfile): path = pppd_find() if not path: return None args = [path] args += ["nodetach"] args += ["lock"] args += ["nodefaultroute"] args += ["debug"] if user: args += ["user"] args += [user] args += ["noipdefault"] args += ["115200"] args += ["noauth"] args += ["crtscts"] args += ["modem"] args += ["usepeerdns"] args += ["ipparam"] ipparam = "" if user: ipparam += user ipparam += "+" if password: ipparam += password ipparam += "+" ipparam += tmpfile args += [ipparam] args += ["plugin"] args += ["mm-test-pppd-plugin.so"] args += [device] return subprocess.Popen(args, close_fds=True, cwd="/", env={}) def ppp_wait(p, tmpfile): i = 0 while p.poll() == None and i < 30: time.sleep(1) if os.path.exists(tmpfile): f = open(tmpfile, 'r') stuff = f.read(500) idx = string.find(stuff, "DONE") f.close() if idx >= 0: return True i += 1 return False def ppp_stop(p): import signal p.send_signal(signal.SIGTERM) p.wait() def ntop_helper(ip): ip = socket.ntohl(ip) n1 = ip >> 24 & 0xFF n2 = ip >> 16 & 0xFF n3 = ip >> 8 & 0xFF n4 = ip & 0xFF a = "%c%c%c%c" % (n1, n2, n3, n4) return socket.inet_ntop(socket.AF_INET, a) def static_start(iface, modem): (addr_num, dns1_num, dns2_num, dns3_num) = modem.GetIP4Config() addr = ntop_helper(addr_num) dns1 = ntop_helper(dns1_num) dns2 = ntop_helper(dns2_num) configure_iface(iface, addr, 0, dns1, dns2) def down_iface(iface): ip = ["ip", "addr", "flush", "dev", iface] print " ".join(ip) subprocess.call(ip) ip = ["ip", "link", "set", iface, "down"] print " ".join(ip) subprocess.call(ip) def configure_iface(iface, addr, gw, dns1, dns2): print "\n\n******************************" print "iface: %s" % iface print "addr: %s" % addr print "gw: %s" % gw print "dns1: %s" % dns1 print "dns2: %s" % dns2 ifconfig = ["ifconfig", iface, "%s/32" % addr] if gw != 0: ifconfig += ["pointopoint", gw] print " ".join(ifconfig) print "\n******************************\n" subprocess.call(ifconfig) def file_configure_iface(tmpfile): addr = None gw = None iface = None dns1 = None dns2 = None f = open(tmpfile, 'r') lines = f.readlines() for l in lines: if l.startswith("addr"): addr = l[len("addr"):].strip() if l.startswith("gateway"): gw = l[len("gateway"):].strip() if l.startswith("iface"): iface = l[len("iface"):].strip() if l.startswith("dns1"): dns1 = l[len("dns1"):].strip() if l.startswith("dns2"): dns2 = l[len("dns2"):].strip() f.close() configure_iface(iface, addr, gw, dns1, dns2) return iface def try_ping(iface): cmd = ["ping", "-I", iface, "-c", "4", "-i", "3", "-w", "20", "4.2.2.1"] print " ".join(cmd) retcode = subprocess.call(cmd) if retcode != 0: print "PING: failed" else: print "PING: success" dump_private = False connect = False apn = None user = None password = None do_ip = False do_scan = True x = 1 while x < len(sys.argv): if sys.argv[x] == "--private": dump_private = True elif sys.argv[x] == "--connect": connect = True elif (sys.argv[x] == "--user" or sys.argv[x] == "--username"): x += 1 user = sys.argv[x] elif sys.argv[x] == "--apn": x += 1 apn = sys.argv[x] elif sys.argv[x] == "--password": x += 1 password = sys.argv[x] elif sys.argv[x] == "--ip": do_ip = True if os.geteuid() != 0: print "You probably want to be root to use --ip" sys.exit(1) elif sys.argv[x] == "--no-scan": do_scan = False x += 1 bus = dbus.SystemBus() # Get available modems: manager_proxy = bus.get_object('org.freedesktop.ModemManager', '/org/freedesktop/ModemManager') manager_iface = dbus.Interface(manager_proxy, dbus_interface='org.freedesktop.ModemManager') modems = manager_iface.EnumerateDevices() if not modems: print "No modems found" sys.exit(1) for m in modems: connect_success = False data_device = None proxy = bus.get_object(MM_DBUS_SERVICE, m) # Properties props_iface = dbus.Interface(proxy, dbus_interface='org.freedesktop.DBus.Properties') type = props_iface.Get(MM_DBUS_INTERFACE_MODEM, 'Type') if type == 1: print "GSM modem" elif type == 2: print "CDMA modem" else: print "Invalid modem type: %d" % type print "Driver: '%s'" % (props_iface.Get(MM_DBUS_INTERFACE_MODEM, 'Driver')) print "Modem device: '%s'" % (props_iface.Get(MM_DBUS_INTERFACE_MODEM, 'MasterDevice')) data_device = props_iface.Get(MM_DBUS_INTERFACE_MODEM, 'Device') print "Data device: '%s'" % data_device # Modem interface modem = dbus.Interface(proxy, dbus_interface=MM_DBUS_INTERFACE_MODEM) try: modem.Enable(True) except dbus.exceptions.DBusException, e: print "Error enabling modem: %s" % e sys.exit(1) info = modem.GetInfo() print "Vendor: %s" % info[0] print "Model: %s" % info[1] print "Version: %s" % info[2] if type == 1: gsm_inspect(proxy, dump_private, do_scan) if connect == True: connect_success = gsm_connect(proxy, apn, user, password) elif type == 2: cdma_inspect(proxy, dump_private) if connect == True: connect_success = cdma_connect(proxy, user, password) print if connect_success and do_ip: tmpfile = "/tmp/mm-test-%d.tmp" % os.getpid() success = False try: ip_method = props_iface.Get(MM_DBUS_INTERFACE_MODEM, 'IpMethod') if ip_method == 0: # ppp p = ppp_start(data_device, user, password, tmpfile) if ppp_wait(p, tmpfile): data_device = file_configure_iface(tmpfile) success = True elif ip_method == 1: # static static_start(data_device, modem) success = True elif ip_method == 2: # dhcp pass except Exception, e: print "Error setting up IP: %s" % e if success: try_ping(data_device) print "Waiting for 30s..." time.sleep(30) print "Disconnecting..." try: if ip_method == 0: ppp_stop(p) try: os.remove(tmpfile) except: pass elif ip_method == 1: # static down_iface(data_device) elif ip_method == 2: # dhcp down_iface(data_device) modem.Disconnect() except Exception, e: print "Error tearing down IP: %s" % e time.sleep(5) modem.Enable(False)
miurahr/ModemManager
test/mm-test.py
Python
gpl-2.0
15,258
from __future__ import absolute_import ########################################################################### # (C) Vrije Universiteit, Amsterdam (the Netherlands) # # # # This file is part of AmCAT - The Amsterdam Content Analysis Toolkit # # # # AmCAT is free software: you can redistribute it and/or modify it under # # the terms of the GNU Affero General Public License as published by the # # Free Software Foundation, either version 3 of the License, or (at your # # option) any later version. # # # # AmCAT is distributed in the hope that it will be useful, but WITHOUT # # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public # # License for more details. # # # # You should have received a copy of the GNU Affero General Public # # License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. # ########################################################################### from .base import * from .misc import * from .elastic import * from .scraping_celery import * try: from .private import * except ImportError: pass
aemal/westcat
settings/__init__.py
Python
agpl-3.0
1,565
import binascii import base64 class UnexpectedDER(Exception): pass def encode_constructed(tag, value): return chr(0xa0+tag) + encode_length(len(value)) + value def encode_integer(r): assert r >= 0 # can't support negative numbers yet h = "%x" % r if len(h)%2: h = "0" + h s = binascii.unhexlify(h) if ord(s[0]) <= 0x7f: return "\x02" + chr(len(s)) + s else: # DER integers are two's complement, so if the first byte is # 0x80-0xff then we need an extra 0x00 byte to prevent it from # looking negative. return "\x02" + chr(len(s)+1) + "\x00" + s def encode_bitstring(s): return "\x03" + encode_length(len(s)) + s def encode_octet_string(s): return "\x04" + encode_length(len(s)) + s def encode_oid(first, second, *pieces): assert first <= 2 assert second <= 39 encoded_pieces = [chr(40*first+second)] + [encode_number(p) for p in pieces] body = "".join(encoded_pieces) return "\x06" + encode_length(len(body)) + body def encode_sequence(*encoded_pieces): total_len = sum([len(p) for p in encoded_pieces]) return "\x30" + encode_length(total_len) + "".join(encoded_pieces) def encode_number(n): b128_digits = [] while n: b128_digits.insert(0, (n & 0x7f) | 0x80) n = n >> 7 if not b128_digits: b128_digits.append(0) b128_digits[-1] &= 0x7f return "".join([chr(d) for d in b128_digits]) def remove_constructed(string): s0 = ord(string[0]) if (s0 & 0xe0) != 0xa0: raise UnexpectedDER("wanted constructed tag (0xa0-0xbf), got 0x%02x" % s0) tag = s0 & 0x1f length, llen = read_length(string[1:]) body = string[1+llen:1+llen+length] rest = string[1+llen+length:] return tag, body, rest def remove_sequence(string): if not string.startswith("\x30"): raise UnexpectedDER("wanted sequence (0x30), got 0x%02x" % ord(string[0])) length, lengthlength = read_length(string[1:]) endseq = 1+lengthlength+length return string[1+lengthlength:endseq], string[endseq:] def remove_octet_string(string): if not string.startswith("\x04"): raise UnexpectedDER("wanted octetstring (0x04), got 0x%02x" % ord(string[0])) length, llen = read_length(string[1:]) body = string[1+llen:1+llen+length] rest = string[1+llen+length:] return body, rest def remove_object(string): if not string.startswith("\x06"): raise UnexpectedDER("wanted object (0x06), got 0x%02x" % ord(string[0])) length, lengthlength = read_length(string[1:]) body = string[1+lengthlength:1+lengthlength+length] rest = string[1+lengthlength+length:] numbers = [] while body: n, ll = read_number(body) numbers.append(n) body = body[ll:] n0 = numbers.pop(0) first = n0//40 second = n0-(40*first) numbers.insert(0, first) numbers.insert(1, second) return tuple(numbers), rest def remove_integer(string): if not string.startswith("\x02"): raise UnexpectedDER("wanted integer (0x02), got 0x%02x" % ord(string[0])) length, llen = read_length(string[1:]) numberbytes = string[1+llen:1+llen+length] rest = string[1+llen+length:] assert ord(numberbytes[0]) < 0x80 # can't support negative numbers yet return int(binascii.hexlify(numberbytes), 16), rest def read_number(string): number = 0 llen = 0 # base-128 big endian, with b7 set in all but the last byte while True: if llen > len(string): raise UnexpectedDER("ran out of length bytes") number = number << 7 d = ord(string[llen]) number += (d & 0x7f) llen += 1 if not d & 0x80: break return number, llen def encode_length(l): assert l >= 0 if l < 0x80: return chr(l) s = "%x" % l if len(s)%2: s = "0"+s s = binascii.unhexlify(s) llen = len(s) return chr(0x80|llen) + s def read_length(string): if not (ord(string[0]) & 0x80): # short form return (ord(string[0]) & 0x7f), 1 # else long-form: b0&0x7f is number of additional base256 length bytes, # big-endian llen = ord(string[0]) & 0x7f if llen > len(string)-1: raise UnexpectedDER("ran out of length bytes") return int(binascii.hexlify(string[1:1+llen]), 16), 1+llen def remove_bitstring(string): if not string.startswith("\x03"): raise UnexpectedDER("wanted bitstring (0x03), got 0x%02x" % ord(string[0])) length, llen = read_length(string[1:]) body = string[1+llen:1+llen+length] rest = string[1+llen+length:] return body, rest # SEQUENCE([1, STRING(secexp), cont[0], OBJECT(curvename), cont[1], BINTSTRING) # signatures: (from RFC3279) # ansi-X9-62 OBJECT IDENTIFIER ::= { # iso(1) member-body(2) us(840) 10045 } # # id-ecSigType OBJECT IDENTIFIER ::= { # ansi-X9-62 signatures(4) } # ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { # id-ecSigType 1 } ## so 1,2,840,10045,4,1 ## so 0x42, .. .. # Ecdsa-Sig-Value ::= SEQUENCE { # r INTEGER, # s INTEGER } # id-public-key-type OBJECT IDENTIFIER ::= { ansi-X9.62 2 } # # id-ecPublicKey OBJECT IDENTIFIER ::= { id-publicKeyType 1 } # I think the secp224r1 identifier is (t=06,l=05,v=2b81040021) # secp224r1 OBJECT IDENTIFIER ::= { # iso(1) identified-organization(3) certicom(132) curve(0) 33 } # and the secp384r1 is (t=06,l=05,v=2b81040022) # secp384r1 OBJECT IDENTIFIER ::= { # iso(1) identified-organization(3) certicom(132) curve(0) 34 } def unpem(pem): d = "".join([l.strip() for l in pem.split("\n") if l and not l.startswith("-----")]) return base64.b64decode(d) def topem(der, name): b64 = base64.b64encode(der) lines = ["-----BEGIN %s-----\n" % name] lines.extend([b64[start:start+64]+"\n" for start in range(0, len(b64), 64)]) lines.append("-----END %s-----\n" % name) return "".join(lines)
kazcw/NGCCCBase
ecdsa/der.py
Python
mit
6,199
import sublime, sublime_plugin import os.path import platform def compare_file_names(x, y): if platform.system() == 'Windows' or platform.system() == 'Darwin': return x.lower() == y.lower() else: return x == y class SwitchFileCommand(sublime_plugin.WindowCommand): def run(self, extensions=[]): if not self.window.active_view(): return fname = self.window.active_view().file_name() if not fname: return path = os.path.dirname(fname) base, ext = os.path.splitext(fname) start = 0 count = len(extensions) if ext != "": ext = ext[1:] for i in range(0, len(extensions)): if compare_file_names(extensions[i], ext): start = i + 1 count -= 1 break for i in range(0, count): idx = (start + i) % len(extensions) new_path = base + '.' + extensions[idx] if os.path.exists(new_path): self.window.open_file(new_path) break
koery/win-sublime
Data/Packages/Default/switch_file.py
Python
mit
1,112
# Opus/UrbanSim urban simulation software. # Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington # See opus_core/LICENSE # Utility classes that can be used to generate parse tree patterns. These # utilities take a sample expression or statement, and return a parse tree that # uses symbolic names for the nodes. You'll need to then do additional editing on # the parse tree as needed (for example, replacing a specific value with a pattern). import parser from symbol import sym_name from token import tok_name from pprint import pprint # pretty-prints a symbolic parse tree for expr (as for use with 'eval') # the symbolic names will be strings, so to use this as a constant # in some code you'll need to replace the quotes with nothing # (except for the actual string constants ...) def print_eval_tree(expr): t = parser.ast2tuple(parser.expr(expr)) # t = parser.ast2tuple(parser.suite(expr)) pprint(integer2symbolic(t)) # same as print_eval_tree, except as for use with 'exec' (for definitions, statements, etc) def print_exec_tree(expr): t = parser.ast2tuple(parser.suite(expr)) pprint(integer2symbolic(t)) # take a parse tree represented as a tuple, and return a new tuple # where the integers representing internal nodes and terminal nodes are # replaced with symbolic names def integer2symbolic(fragment): head = fragment[0] if head in sym_name: rest = tuple(map(integer2symbolic, fragment[1:])) return ('symbol.' + sym_name[head], ) + rest if head in tok_name: return ('token.' + tok_name[head], ) + fragment[1:] raise ValueError("bad value in parsetree") # examples of use: # print_eval_tree("urbansim.gridcell.population**2") # print_exec_tree("x = urbansim.gridcell.population**2") s = """def foo(x=5): y = x+3 return y*2 """ print_exec_tree(s)
apdjustino/DRCOG_Urbansim
src/opus_core/variables/utils/parse_tree_pattern_generator.py
Python
agpl-3.0
1,882
from __future__ import division import numpy as np class QuantileBasedSelection(object): def __init__(self, is_minimize=True, is_normalize=False): self.is_minimize = is_minimize self.is_normalize = is_normalize def __call__(self, evals, coefficient=None, xp=np): quantiles = self.compute_quantiles(evals, coefficient=None, xp=xp) weight = self.transform(quantiles, xp=xp) if self.is_normalize: weight /= xp.linalg.norm(weight, ord=1) return weight def compute_quantiles(self, evals, coefficient=None, xp=np, rank_rule='upper'): pop_size = evals.shape[0] if coefficient is None: coefficient = xp.ones(pop_size) sorter = xp.argsort(evals) if self.is_minimize is False: sorter = sorter[::-1] # set label sequentially that minimum eval = 0 , ... , maximum eval = pop_size - 1 # --- Example --- # eval = [12, 13, 10] # inv = [ 1, 2, 0] inv = xp.empty(sorter.size, dtype=xp.integer) inv[sorter] = xp.arange(sorter.size, dtype=xp.integer) arr = evals[sorter] obs = xp.r_[True, arr[1:] != arr[:-1]] dense = xp.cumsum(obs)[inv] # cumulative counts of likelihood ratio count = xp.r_[False, xp.cumsum(coefficient[sorter])] if rank_rule == 'upper': cum_llr = count[dense] elif rank_rule == 'lower': cum_llr = count[dense - 1] quantile = cum_llr / pop_size return quantile def transform(self, rank_based_vals, xp=np): raise NotImplementedError() class RankingBasedSelection(QuantileBasedSelection): def __init__(self, is_minimize=True, is_normalize=False): super(RankingBasedSelection, self).__init__(is_minimize, is_normalize) def __call__(self, evals, coefficient=None, xp=np,): ranking = self.compute_ranking(evals, coefficient=coefficient, xp=xp) weight = self.transform(ranking, xp=xp) if self.is_minimize: weight /= xp.linalg.norm(weight, ord=1) return weight def compute_ranking(self, evals, coefficient=None, xp=np): return self.compute_quantiles(evals, coefficient=coefficient, xp=xp) * len(evals)
satuma777/evoltier
evoltier/weight.py
Python
gpl-3.0
2,268
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for haiku._src.layer_norm.""" import functools import itertools from absl.testing import absltest from absl.testing import parameterized from haiku._src import initializers from haiku._src import layer_norm from haiku._src import test_utils from haiku._src import transform import jax import jax.numpy as jnp import numpy as np def with_param_axis_error(f): @functools.wraps(f) def wrapper(*a, **k): old = layer_norm.ERROR_IF_PARAM_AXIS_NOT_EXPLICIT layer_norm.ERROR_IF_PARAM_AXIS_NOT_EXPLICIT = True try: return f(*a, **k) finally: layer_norm.ERROR_IF_PARAM_AXIS_NOT_EXPLICIT = old return wrapper class LayerNormTest(parameterized.TestCase): @test_utils.transform_and_run def test_connection(self): data = jnp.zeros([2, 3, 4, 5]) normalize = ( lambda a: layer_norm.LayerNorm(a, True, True, param_axis=-1)(data)) normalize(0) normalize(1) normalize(2) normalize(3) normalize(slice(1, None)) normalize(slice(2, None)) normalize(slice(1, -1)) @parameterized.parameters(itertools.product([True, False], repeat=3)) def test_bf16(self, create_scale, create_offset, use_fast_variance): """For all configurations, ensure bf16 outputs from bf16 inputs.""" def f(x): ln = layer_norm.LayerNorm( axis=-1, create_scale=create_scale, create_offset=create_offset, use_fast_variance=use_fast_variance, param_axis=-1) return ln(x) fwd = transform.transform(f) data = jnp.zeros([2, 3, 4, 5], dtype=jnp.bfloat16) params = fwd.init(jax.random.PRNGKey(428), data) bf16_params = jax.tree_map(lambda t: t.astype(jnp.bfloat16), params) self.assertEqual(fwd.apply(bf16_params, None, data).dtype, jnp.bfloat16) @parameterized.parameters(True, False) @test_utils.transform_and_run def test_simple_case(self, use_fast_variance): layer = layer_norm.LayerNorm([1, 2], create_scale=False, create_offset=False, use_fast_variance=use_fast_variance, param_axis=-1) inputs = np.ones([2, 3, 3, 5]) outputs = layer(inputs) for x in np.nditer(outputs): self.assertEqual(x, 0.0) @parameterized.parameters(True, False) @test_utils.transform_and_run def test_simple_case_var(self, use_fast_variance): layer = layer_norm.LayerNorm([1, 2], create_scale=True, create_offset=True, scale_init=initializers.Constant(0.5), offset_init=initializers.Constant(2.0), use_fast_variance=use_fast_variance, param_axis=-1) inputs = np.ones([2, 3, 3, 5]) outputs = layer(inputs) for x in np.nditer(outputs): self.assertEqual(x, 2.0) @test_utils.transform_and_run def test_simple_case_tensor(self): layer = layer_norm.LayerNorm([1, 2], create_scale=False, create_offset=False, param_axis=-1) inputs = np.ones([2, 3, 3, 5]) scale = np.full((5,), 0.5) offset = np.full((5,), 2.0) outputs = layer(inputs, scale, offset) for x in np.nditer(outputs): self.assertEqual(x, 2.0) @parameterized.named_parameters(("String", "foo"), ("ListString", ["foo"])) @test_utils.transform_and_run def test_invalid_axis(self, axis): with self.assertRaisesRegex( ValueError, "`axis` should be an int, slice or iterable of ints."): layer_norm.LayerNorm(axis, create_scale=False, create_offset=False) @test_utils.transform_and_run def test_no_scale_and_init_provided(self): with self.assertRaisesRegex( ValueError, "Cannot set `scale_init` if `create_scale=False`."): layer_norm.LayerNorm( 3, create_scale=False, create_offset=True, scale_init=np.ones) @test_utils.transform_and_run def test_no_offset_beta_init_provided(self): with self.assertRaisesRegex( ValueError, "Cannot set `offset_init` if `create_offset=False`."): layer_norm.LayerNorm( 3, create_scale=True, create_offset=False, offset_init=np.zeros) @test_utils.transform_and_run def test_create_scale_and_scale_provided(self): layer = layer_norm.LayerNorm([2], create_scale=True, create_offset=False) with self.assertRaisesRegex( ValueError, "Cannot pass `scale` at call time if `create_scale=True`."): layer(np.ones([2, 3, 4]), scale=np.ones([4])) @test_utils.transform_and_run def test_create_offset_and_offset_provided(self): layer = layer_norm.LayerNorm([2], create_offset=True, create_scale=False) with self.assertRaisesRegex( ValueError, "Cannot pass `offset` at call time if `create_offset=True`."): layer(np.ones([2, 3, 4]), offset=np.ones([4])) @parameterized.parameters(True, False) @test_utils.transform_and_run def test_slice_axis(self, use_fast_variance): slice_layer = layer_norm.LayerNorm( slice(1, -1), create_scale=False, create_offset=False, use_fast_variance=use_fast_variance, param_axis=-1) axis_layer = layer_norm.LayerNorm((1, 2), create_scale=False, create_offset=False, use_fast_variance=use_fast_variance, param_axis=-1) inputs = np.random.uniform(size=[3, 4, 4, 5], low=0, high=10) scale = np.random.normal(size=(5,), loc=1.0) offset = np.random.normal(size=(5,)) slice_outputs = slice_layer(inputs, scale, offset) axis_outputs = axis_layer(inputs, scale, offset) np.testing.assert_array_equal(slice_outputs, axis_outputs) @test_utils.transform_and_run def test_connection_instance_norm(self): layer = layer_norm.InstanceNorm(create_scale=True, create_offset=True) inputs = np.ones([3, 4, 5, 6]) result = layer(inputs) self.assertEqual(result.shape, (3, 4, 5, 6)) @test_utils.transform_and_run def test_param_axis_not_required_for_final_axis(self): ln = layer_norm.LayerNorm(-1, True, True) x = jnp.ones([3, 4, 5, 6]) ln(x) self.assertEqual(ln.params_dict()["layer_norm/scale"].shape, (6,)) self.assertEqual(ln.params_dict()["layer_norm/offset"].shape, (6,)) @test_utils.transform_and_run def test_error_prone_param_axis(self): # NOTE: This test defends current, potentially error prone behaviour # (passing axis!=-1 and not passing param_axis). It will be removed in a # future version of Haiku. ln = layer_norm.LayerNorm(1, True, True) x = jnp.ones([3, 4, 5, 6]) ln(x) self.assertEqual(ln.params_dict()["layer_norm/scale"].shape, (6,)) self.assertEqual(ln.params_dict()["layer_norm/offset"].shape, (6,)) @parameterized.parameters(0, 1, 2, ((0, 1),), ((0, 1, 2),), -2, -3, -4, slice(0, 2)) @test_utils.transform_and_run @with_param_axis_error def test_param_axis_required_for_non_final_axis(self, axis): ln = layer_norm.LayerNorm(axis, True, True) x = jnp.ones([3, 4, 5, 6]) with self.assertRaisesRegex(ValueError, "pass.*param_axis.*in the ctor"): ln(x) @parameterized.parameters( (-1, (6,)), (-2, (1, 1, 5, 1)), (-3, (1, 4, 1, 1)), (-4, (3, 1, 1, 1)), (0, (3, 1, 1, 1)), (1, (1, 4, 1, 1)), (2, (1, 1, 5, 1)), (3, (6,)), ) @test_utils.transform_and_run def test_param_axis_sets_param_shape(self, param_axis, param_shape): ln = layer_norm.LayerNorm(-1, True, True, param_axis=param_axis) x = jnp.ones([3, 4, 5, 6]) ln(x) self.assertEqual(ln.params_dict()["layer_norm/scale"].shape, param_shape) self.assertEqual(ln.params_dict()["layer_norm/offset"].shape, param_shape) @parameterized.parameters( ((0, 1, 2), (3, 4, 5, 1)), ((-4, -2, -3), (3, 4, 5, 1)), ((0, 1), (3, 4, 1, 1)), ((0, 3), (3, 1, 1, 6)), ((-4, -1), (3, 1, 1, 6)), ((-1, -4), (3, 1, 1, 6)), ) @test_utils.transform_and_run def test_multiple_param_axis(self, param_axis, param_shape): ln = layer_norm.LayerNorm(-1, True, True, param_axis=param_axis) x = jnp.ones([3, 4, 5, 6]) ln(x) self.assertEqual(ln.params_dict()["layer_norm/scale"].shape, param_shape) self.assertEqual(ln.params_dict()["layer_norm/offset"].shape, param_shape) if __name__ == "__main__": absltest.main()
deepmind/dm-haiku
haiku/_src/layer_norm_test.py
Python
apache-2.0
9,340
from sympy.core import Tuple, Basic, Add from sympy.strategies import typed, canon, debug, do_one, unpack from sympy.functions import transpose from sympy.utilities import sift from sympy.matrices.expressions.matexpr import MatrixExpr, ZeroMatrix, Identity from sympy.matrices.expressions.matmul import MatMul from sympy.matrices.expressions.matadd import MatAdd from sympy.matrices.expressions.matpow import MatPow from sympy.matrices.expressions.transpose import Transpose from sympy.matrices.expressions.trace import Trace from sympy.matrices.expressions.slice import MatrixSlice from sympy.matrices.expressions.inverse import Inverse from sympy.matrices import Matrix, eye class BlockMatrix(MatrixExpr): """A BlockMatrix is a Matrix composed of other smaller, submatrices The submatrices are stored in a SymPy Matrix object but accessed as part of a Matrix Expression >>> from sympy import (MatrixSymbol, BlockMatrix, symbols, ... Identity, ZeroMatrix, block_collapse) >>> n,m,l = symbols('n m l') >>> X = MatrixSymbol('X', n, n) >>> Y = MatrixSymbol('Y', m ,m) >>> Z = MatrixSymbol('Z', n, m) >>> B = BlockMatrix([[X, Z], [ZeroMatrix(m,n), Y]]) >>> print B [X, Z] [0, Y] >>> C = BlockMatrix([[Identity(n), Z]]) >>> print C [I, Z] >>> print block_collapse(C*B) [X, Z + Z*Y] """ def __new__(cls, *args): from sympy.matrices.immutable import ImmutableMatrix mat = ImmutableMatrix(*args) obj = Basic.__new__(cls, mat) return obj @property def shape(self): numrows = numcols = 0 M = self.blocks for i in range(M.shape[0]): numrows += M[i, 0].shape[0] for i in range(M.shape[1]): numcols += M[0, i].shape[1] return (numrows, numcols) @property def blockshape(self): return self.blocks.shape @property def blocks(self): return self.args[0] @property def rowblocksizes(self): return [self.blocks[i, 0].rows for i in range(self.blockshape[0])] @property def colblocksizes(self): return [self.blocks[0, i].cols for i in range(self.blockshape[1])] def structurally_equal(self, other): return (isinstance(other, BlockMatrix) and self.shape == other.shape and self.blockshape == other.blockshape and self.rowblocksizes == other.rowblocksizes and self.colblocksizes == other.colblocksizes) def _blockmul(self, other): if (isinstance(other, BlockMatrix) and self.colblocksizes == other.rowblocksizes): return BlockMatrix(self.blocks*other.blocks) return self * other def _blockadd(self, other): if (isinstance(other, BlockMatrix) and self.structurally_equal(other)): return BlockMatrix(self.blocks + other.blocks) return self + other def _eval_transpose(self): # Flip all the individual matrices matrices = [transpose(matrix) for matrix in self.blocks] # Make a copy M = Matrix(self.blockshape[0], self.blockshape[1], matrices) # Transpose the block structure M = M.transpose() return BlockMatrix(M) def _eval_trace(self): if self.rowblocksizes == self.colblocksizes: return Add(*[Trace(self.blocks[i, i]) for i in range(self.blockshape[0])]) raise NotImplementedError( "Can't perform trace of irregular blockshape") def transpose(self): """Return transpose of matrix. Examples ======== >>> from sympy import MatrixSymbol, BlockMatrix, ZeroMatrix >>> from sympy.abc import l, m, n >>> X = MatrixSymbol('X', n, n) >>> Y = MatrixSymbol('Y', m ,m) >>> Z = MatrixSymbol('Z', n, m) >>> B = BlockMatrix([[X, Z], [ZeroMatrix(m,n), Y]]) >>> B.transpose() [X', 0] [Z', Y'] >>> _.transpose() [X, Z] [0, Y] """ return self._eval_transpose() def _eval_inverse(self, expand=False): # Inverse of one by one block matrix is easy if self.blockshape == (1, 1): mat = Matrix(1, 1, (self.blocks[0].inverse(),)) return BlockMatrix(mat) # Inverse of a two by two block matrix is known elif expand and self.blockshape == (2, 2): # Cite: The Matrix Cookbook Section 9.1.3 A11, A12, A21, A22 = (self.blocks[0, 0], self.blocks[0, 1], self.blocks[1, 0], self.blocks[1, 1]) C1 = A11 - A12*A22.I*A21 C2 = A22 - A21*A11.I*A12 mat = Matrix([[C1.I, (-A11).I*A12*C2.I], [-C2.I*A21*A11.I, C2.I]]) return BlockMatrix(mat) else: return Inverse(self) def inverse(self, expand=False): """Return inverse of matrix. Examples ======== >>> from sympy import MatrixSymbol, BlockMatrix, ZeroMatrix >>> from sympy.abc import l, m, n >>> X = MatrixSymbol('X', n, n) >>> BlockMatrix([[X]]).inverse() [X^-1] >>> Y = MatrixSymbol('Y', m ,m) >>> Z = MatrixSymbol('Z', n, m) >>> B = BlockMatrix([[X, Z], [ZeroMatrix(m,n), Y]]) >>> B [X, Z] [0, Y] >>> B.inverse(expand=True) [X^-1, (-1)*X^-1*Z*Y^-1] [ 0, Y^-1] """ return self._eval_inverse(expand) def _entry(self, i, j): # Find row entry for row_block, numrows in enumerate(self.rowblocksizes): if i < numrows: break else: i -= numrows for col_block, numcols in enumerate(self.colblocksizes): if j < numcols: break else: j -= numcols return self.blocks[row_block, col_block][i, j] @property def is_Identity(self): if self.blockshape[0] != self.blockshape[1]: return False for i in range(self.blockshape[0]): for j in range(self.blockshape[1]): if i==j and not self.blocks[i, j].is_Identity: return False if i!=j and not self.blocks[i, j].is_ZeroMatrix: return False return True @property def is_structurally_symmetric(self): return self.rowblocksizes == self.colblocksizes def equals(self, other): if self == other: return True if (isinstance(other, BlockMatrix) and self.blocks == other.blocks): return True return super(BlockMatrix, self).equals(other) class BlockDiagMatrix(BlockMatrix): """ A BlockDiagMatrix is a BlockMatrix with matrices only along the diagonal >>> from sympy import MatrixSymbol, BlockDiagMatrix, symbols, Identity >>> n,m,l = symbols('n m l') >>> X = MatrixSymbol('X', n, n) >>> Y = MatrixSymbol('Y', m ,m) >>> BlockDiagMatrix(X, Y) [X, 0] [0, Y] """ def __new__(cls, *mats): return Basic.__new__(BlockDiagMatrix, *mats) @property def diag(self): return self.args @property def blocks(self): from sympy.matrices.immutable import ImmutableMatrix mats = self.args data = [[mats[i] if i == j else ZeroMatrix(mats[i].rows, mats[j].cols) for j in range(len(mats))] for i in range(len(mats))] return ImmutableMatrix(data) @property def shape(self): return (sum(block.rows for block in self.args), sum(block.cols for block in self.args)) @property def blockshape(self): n = len(self.args) return (n, n) @property def rowblocksizes(self): return [block.rows for block in self.args] @property def colblocksizes(self): return [block.cols for block in self.args] def _eval_inverse(self, expand='ignored'): return BlockDiagMatrix(*[mat.inverse() for mat in self.args]) def _blockmul(self, other): if (isinstance(other, BlockDiagMatrix) and self.colblocksizes == other.rowblocksizes): return BlockDiagMatrix(*[a*b for a, b in zip(self.args, other.args)]) else: return BlockMatrix._blockmul(self, other) def _blockadd(self, other): if (isinstance(other, BlockDiagMatrix) and self.blockshape == other.blockshape and self.rowblocksizes == other.rowblocksizes and self.colblocksizes == other.colblocksizes): return BlockDiagMatrix(*[a + b for a, b in zip(self.args, other.args)]) else: return BlockMatrix._blockadd(self, other) def block_collapse(expr): """Evaluates a block matrix expression >>> from sympy import MatrixSymbol, BlockMatrix, symbols, \ Identity, Matrix, ZeroMatrix, block_collapse >>> n,m,l = symbols('n m l') >>> X = MatrixSymbol('X', n, n) >>> Y = MatrixSymbol('Y', m ,m) >>> Z = MatrixSymbol('Z', n, m) >>> B = BlockMatrix([[X, Z], [ZeroMatrix(m, n), Y]]) >>> print B [X, Z] [0, Y] >>> C = BlockMatrix([[Identity(n), Z]]) >>> print C [I, Z] >>> print block_collapse(C*B) [X, Z + Z*Y] """ rule = canon(typed({MatAdd: do_one(bc_matadd, bc_block_plus_ident), MatMul: do_one(bc_matmul, bc_dist), BlockMatrix: bc_unpack})) result = rule(expr) try: return result.doit() except AttributeError: return result def bc_unpack(expr): if expr.blockshape == (1, 1): return expr.blocks[0, 0] return expr def bc_matadd(expr): args = sift(expr.args, lambda M: isinstance(M, BlockMatrix)) blocks = args[True] if not blocks: return expr nonblocks = args[False] block = blocks[0] for b in blocks[1:]: block = block._blockadd(b) if nonblocks: return MatAdd(*nonblocks) + block else: return block def bc_block_plus_ident(expr): idents = [arg for arg in expr.args if arg.is_Identity] if not idents: return expr blocks = [arg for arg in expr.args if isinstance(arg, BlockMatrix)] if (blocks and all(b.structurally_equal(blocks[0]) for b in blocks) and blocks[0].is_structurally_symmetric): block_id = BlockDiagMatrix(*[Identity(k) for k in blocks[0].rowblocksizes]) return MatAdd(block_id * len(idents), *blocks).doit() return expr def bc_dist(expr): """ Turn a*[X, Y] into [a*X, a*Y] """ factor, mat = expr.as_coeff_mmul() if factor != 1 and isinstance(unpack(mat), BlockMatrix): B = unpack(mat).blocks return BlockMatrix([[factor * B[i, j] for j in range(B.cols)] for i in range(B.rows)]) return expr def bc_matmul(expr): factor, matrices = expr.as_coeff_matrices() i = 0 while (i+1 < len(matrices)): A, B = matrices[i:i+2] if isinstance(A, BlockMatrix) and isinstance(B, BlockMatrix): matrices[i] = A._blockmul(B) matrices.pop(i+1) else: i+=1 return MatMul(factor, *matrices).doit() def bounds(sizes): """ Convert sequence of numbers into pairs of low-high pairs >>> from sympy.matrices.expressions.blockmatrix import bounds >>> bounds((1, 10, 50)) [(0, 1), (1, 11), (11, 61)] """ low = 0 rv = [] for size in sizes: rv.append((low, low + size)) low += size return rv def blockcut(expr, rowsizes, colsizes): """ Cut a matrix expression into Blocks >>> from sympy import ImmutableMatrix, blockcut >>> M = ImmutableMatrix(4, 4, range(16)) >>> B = blockcut(M, (1, 3), (1, 3)) >>> type(B).__name__ 'BlockMatrix' >>> ImmutableMatrix(B.blocks[0, 1]) [1, 2, 3] """ rowbounds = bounds(rowsizes) colbounds = bounds(colsizes) return BlockMatrix([[MatrixSlice(expr, rowbound, colbound) for colbound in colbounds] for rowbound in rowbounds])
amitjamadagni/sympy
sympy/matrices/expressions/blockmatrix.py
Python
bsd-3-clause
12,332
from flask import current_app, jsonify, request from notifications_python_client.errors import HTTPError from app import status_api_client, version from app.status import status @status.route('/_status', methods=['GET']) def show_status(): if request.args.get('elb', None) or request.args.get('simple', None): return jsonify(status="ok"), 200 else: try: api_status = status_api_client.get_status() except HTTPError as e: current_app.logger.exception("API failed to respond") return jsonify(status="error", message=str(e.message)), 500 return jsonify( status="ok", api=api_status, git_commit=version.__git_commit__, build_time=version.__time__), 200
alphagov/notifications-admin
app/status/views/healthcheck.py
Python
mit
776
# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. import os import re # noqa import sys import ldap import djcelery from datetime import timedelta from kombu import Queue, Exchange from kombu.common import Broadcast # global settings from django.conf import global_settings # ugettext lazy from django.utils.translation import ugettext_lazy as _ # Update this module's local settings from the global settings module. this_module = sys.modules[__name__] for setting in dir(global_settings): if setting == setting.upper(): setattr(this_module, setting, getattr(global_settings, setting)) # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(__file__)) def is_testing(argv=None): import sys '''Return True if running django or py.test unit tests.''' argv = sys.argv if argv is None else argv if len(argv) >= 1 and ('py.test' in argv[0] or 'py/test.py' in argv[0]): return True elif len(argv) >= 2 and argv[1] == 'test': return True return False def IS_TESTING(argv=None): return is_testing(argv) DEBUG = True TEMPLATE_DEBUG = DEBUG SQL_DEBUG = DEBUG ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'), 'ATOMIC_REQUESTS': True, 'TEST': { # Test database cannot be :memory: for celery/inventory tests. 'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3'), }, } } # Internationalization # https://docs.djangoproject.com/en/dev/topics/i18n/ # # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = None # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True USE_TZ = True STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'ui', 'static'), os.path.join(BASE_DIR, 'static'), ) # Absolute filesystem path to the directory where static file are collected via # the collectstatic command. STATIC_ROOT = os.path.join(BASE_DIR, 'public', 'static') # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/dev/howto/static-files/ STATIC_URL = '/static/' # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = os.path.join(BASE_DIR, 'public', 'media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = '/media/' # Absolute filesystem path to the directory to host projects (with playbooks). # This directory should not be web-accessible. PROJECTS_ROOT = os.path.join(BASE_DIR, 'projects') # Absolute filesystem path to the directory for job status stdout (default for # development and tests, default for production defined in production.py). This # directory should not be web-accessible JOBOUTPUT_ROOT = os.path.join(BASE_DIR, 'job_output') # Absolute filesystem path to the directory to store logs LOG_ROOT = os.path.join(BASE_DIR) # The heartbeat file for the tower scheduler SCHEDULE_METADATA_LOCATION = os.path.join(BASE_DIR, '.tower_cycle') # Django gettext files path: locale/<lang-code>/LC_MESSAGES/django.po, django.mo LOCALE_PATHS = ( os.path.join(BASE_DIR, 'locale'), ) # Graph of resources that can have named-url NAMED_URL_GRAPH = {} # Maximum number of the same job that can be waiting to run when launching from scheduler # Note: This setting may be overridden by database settings. SCHEDULE_MAX_JOBS = 10 SITE_ID = 1 # Make this unique, and don't share it with anybody. SECRET_KEY = 'p7z7g1ql4%6+(6nlebb6hdk7sd^&fnjpal308%n%+p^_e6vo1y' # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # HTTP headers and meta keys to search to determine remote host name or IP. Add # additional items to this list, such as "HTTP_X_FORWARDED_FOR", if behind a # reverse proxy. REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST'] # If Tower is behind a reverse proxy/load balancer, use this setting to # whitelist the proxy IP addresses from which Tower should trust custom # REMOTE_HOST_HEADERS header values # REMOTE_HOST_HEADERS = ['HTTP_X_FORWARDED_FOR', ''REMOTE_ADDR', 'REMOTE_HOST'] # PROXY_IP_WHITELIST = ['10.0.1.100', '10.0.1.101'] # If this setting is an empty list (the default), the headers specified by # REMOTE_HOST_HEADERS will be trusted unconditionally') PROXY_IP_WHITELIST = [] # Note: This setting may be overridden by database settings. STDOUT_MAX_BYTES_DISPLAY = 1048576 # Returned in the header on event api lists as a recommendation to the UI # on how many events to display before truncating/hiding RECOMMENDED_MAX_EVENTS_DISPLAY_HEADER = 4000 # The maximum size of the ansible callback event's res data structure # beyond this limit and the value will be removed MAX_EVENT_RES_DATA = 700000 # Note: This setting may be overridden by database settings. EVENT_STDOUT_MAX_BYTES_DISPLAY = 1024 # The amount of time before a stdout file is expired and removed locally # Note that this can be recreated if the stdout is downloaded LOCAL_STDOUT_EXPIRE_TIME = 2592000 # The number of processes spawned by the callback receiver to process job # events into the database JOB_EVENT_WORKERS = 4 # The maximum size of the job event worker queue before requests are blocked JOB_EVENT_MAX_QUEUE_SIZE = 10000 # Disallow sending session cookies over insecure connections SESSION_COOKIE_SECURE = True # Disallow sending csrf cookies over insecure connections CSRF_COOKIE_SECURE = True # Limit CSRF cookies to browser sessions CSRF_COOKIE_AGE = None TEMPLATE_CONTEXT_PROCESSORS = ( # NOQA 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.request', 'awx.ui.context_processors.settings', 'awx.ui.context_processors.version', 'social.apps.django_app.context_processors.backends', 'social.apps.django_app.context_processors.login_redirect', ) MIDDLEWARE_CLASSES = ( # NOQA 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'awx.main.middleware.ActivityStreamMiddleware', 'awx.sso.middleware.SocialAuthMiddleware', 'crum.CurrentRequestUserMiddleware', 'awx.main.middleware.AuthTokenTimeoutMiddleware', 'awx.main.middleware.URLModificationMiddleware', ) TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'templates'), ) TEMPLATE_LOADERS = ( ('django.template.loaders.cached.Loader', ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', )), ) ROOT_URLCONF = 'awx.urls' WSGI_APPLICATION = 'awx.wsgi.application' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.staticfiles', 'rest_framework', 'django_extensions', 'djcelery', 'kombu.transport.django', 'channels', 'polymorphic', 'taggit', 'social.apps.django_app.default', 'awx.conf', 'awx.main', 'awx.api', 'awx.ui', 'awx.sso', 'solo', ) INTERNAL_IPS = ('127.0.0.1',) MAX_PAGE_SIZE = 200 REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': 'awx.api.pagination.Pagination', 'PAGE_SIZE': 25, 'DEFAULT_AUTHENTICATION_CLASSES': ( 'awx.api.authentication.TokenAuthentication', 'awx.api.authentication.LoggedBasicAuthentication', #'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'awx.api.permissions.ModelAccessPermission', ), 'DEFAULT_FILTER_BACKENDS': ( 'awx.api.filters.TypeFilterBackend', 'awx.api.filters.FieldLookupBackend', 'rest_framework.filters.SearchFilter', 'awx.api.filters.OrderByBackend', ), 'DEFAULT_PARSER_CLASSES': ( 'awx.api.parsers.JSONParser', ), 'DEFAULT_RENDERER_CLASSES': ( 'rest_framework.renderers.JSONRenderer', 'awx.api.renderers.BrowsableAPIRenderer', ), 'DEFAULT_METADATA_CLASS': 'awx.api.metadata.Metadata', 'EXCEPTION_HANDLER': 'awx.api.views.api_exception_handler', 'VIEW_NAME_FUNCTION': 'awx.api.generics.get_view_name', 'VIEW_DESCRIPTION_FUNCTION': 'awx.api.generics.get_view_description', 'NON_FIELD_ERRORS_KEY': '__all__', 'DEFAULT_VERSION': 'v2', #'URL_FORMAT_OVERRIDE': None, } AUTHENTICATION_BACKENDS = ( 'awx.sso.backends.LDAPBackend', 'awx.sso.backends.RADIUSBackend', 'awx.sso.backends.TACACSPlusBackend', 'social.backends.google.GoogleOAuth2', 'social.backends.github.GithubOAuth2', 'social.backends.github.GithubOrganizationOAuth2', 'social.backends.github.GithubTeamOAuth2', 'social.backends.azuread.AzureADOAuth2', 'awx.sso.backends.SAMLAuth', 'django.contrib.auth.backends.ModelBackend', ) # LDAP server (default to None to skip using LDAP authentication). # Note: This setting may be overridden by database settings. AUTH_LDAP_SERVER_URI = None # Disable LDAP referrals by default (to prevent certain LDAP queries from # hanging with AD). # Note: This setting may be overridden by database settings. AUTH_LDAP_CONNECTION_OPTIONS = { ldap.OPT_REFERRALS: 0, ldap.OPT_NETWORK_TIMEOUT: 30 } # Radius server settings (default to empty string to skip using Radius auth). # Note: These settings may be overridden by database settings. RADIUS_SERVER = '' RADIUS_PORT = 1812 RADIUS_SECRET = '' # TACACS+ settings (default host to empty string to skip using TACACS+ auth). # Note: These settings may be overridden by database settings. TACACSPLUS_HOST = '' TACACSPLUS_PORT = 49 TACACSPLUS_SECRET = '' TACACSPLUS_SESSION_TIMEOUT = 5 TACACSPLUS_AUTH_PROTOCOL = 'ascii' # Seconds before auth tokens expire. # Note: This setting may be overridden by database settings. AUTH_TOKEN_EXPIRATION = 1800 # Maximum number of per-user valid, concurrent tokens. # -1 is unlimited # Note: This setting may be overridden by database settings. AUTH_TOKEN_PER_USER = -1 # Enable / Disable HTTP Basic Authentication used in the API browser # Note: Session limits are not enforced when using HTTP Basic Authentication. # Note: This setting may be overridden by database settings. AUTH_BASIC_ENABLED = True # If set, serve only minified JS for UI. USE_MINIFIED_JS = False # Email address that error messages come from. SERVER_EMAIL = 'root@localhost' # Default email address to use for various automated correspondence from # the site managers. DEFAULT_FROM_EMAIL = 'tower@localhost' # Subject-line prefix for email messages send with django.core.mail.mail_admins # or ...mail_managers. Make sure to include the trailing space. EMAIL_SUBJECT_PREFIX = '[Tower] ' # The email backend to use. For possible shortcuts see django.core.mail. # The default is to use the SMTP backend. # Third-party backends can be specified by providing a Python path # to a module that defines an EmailBackend class. EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # Host for sending email. EMAIL_HOST = 'localhost' # Port for sending email. EMAIL_PORT = 25 # Optional SMTP authentication information for EMAIL_HOST. EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = False # Memcached django cache configuration # CACHES = { # 'default': { # 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', # 'LOCATION': '127.0.0.1:11211', # 'TIMEOUT': 864000, # 'KEY_PREFIX': 'tower_dev', # } # } # Use Django-Debug-Toolbar if installed. try: import debug_toolbar INSTALLED_APPS += (debug_toolbar.__name__,) except ImportError: pass DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, 'ENABLE_STACKTRACES' : True, } DEVSERVER_DEFAULT_ADDR = '0.0.0.0' DEVSERVER_DEFAULT_PORT = '8013' # Set default ports for live server tests. os.environ.setdefault('DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:9013-9199') # Initialize Django-Celery. djcelery.setup_loader() BROKER_URL = 'amqp://guest:guest@localhost:5672//' CELERY_EVENT_QUEUE_TTL = 5 CELERY_DEFAULT_QUEUE = 'tower' CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_ACCEPT_CONTENT = ['json'] CELERY_TRACK_STARTED = True CELERYD_TASK_TIME_LIMIT = None CELERYD_TASK_SOFT_TIME_LIMIT = None CELERYD_POOL_RESTARTS = True CELERYBEAT_SCHEDULER = 'celery.beat.PersistentScheduler' CELERYBEAT_MAX_LOOP_INTERVAL = 60 CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' CELERY_IMPORTS = ('awx.main.scheduler.tasks',) CELERY_QUEUES = ( Queue('default', Exchange('default'), routing_key='default'), Queue('tower', Exchange('tower'), routing_key='tower'), Queue('tower_scheduler', Exchange('scheduler', type='topic'), routing_key='tower_scheduler.job.#', durable=False), Broadcast('tower_broadcast_all') ) CELERY_ROUTES = {'awx.main.scheduler.tasks.run_task_manager': {'queue': 'tower', 'routing_key': 'tower'}, 'awx.main.scheduler.tasks.run_job_launch': {'queue': 'tower_scheduler', 'routing_key': 'tower_scheduler.job.launch'}, 'awx.main.scheduler.tasks.run_job_complete': {'queue': 'tower_scheduler', 'routing_key': 'tower_scheduler.job.complete'}, 'awx.main.tasks.cluster_node_heartbeat': {'queue': 'default', 'routing_key': 'cluster.heartbeat'}, 'awx.main.tasks.purge_old_stdout_files': {'queue': 'default', 'routing_key': 'cluster.heartbeat'}} CELERYBEAT_SCHEDULE = { 'tower_scheduler': { 'task': 'awx.main.tasks.awx_periodic_scheduler', 'schedule': timedelta(seconds=30), 'options': {'expires': 20,} }, 'admin_checks': { 'task': 'awx.main.tasks.run_administrative_checks', 'schedule': timedelta(days=30) }, 'authtoken_cleanup': { 'task': 'awx.main.tasks.cleanup_authtokens', 'schedule': timedelta(days=30) }, 'cluster_heartbeat': { 'task': 'awx.main.tasks.cluster_node_heartbeat', 'schedule': timedelta(seconds=60), 'options': {'expires': 50,} }, 'purge_stdout_files': { 'task': 'awx.main.tasks.purge_old_stdout_files', 'schedule': timedelta(days=7) }, 'task_manager': { 'task': 'awx.main.scheduler.tasks.run_task_manager', 'schedule': timedelta(seconds=20), 'options': {'expires': 20,} }, } AWX_INCONSISTENT_TASK_INTERVAL = 60 * 3 # Django Caching Configuration if is_testing(): CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, } else: CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'memcached:11211', }, } # Social Auth configuration. SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy' SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage' SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL # noqa SOCIAL_AUTH_PIPELINE = ( 'social.pipeline.social_auth.social_details', 'social.pipeline.social_auth.social_uid', 'social.pipeline.social_auth.auth_allowed', 'social.pipeline.social_auth.social_user', 'social.pipeline.user.get_username', 'social.pipeline.social_auth.associate_by_email', 'social.pipeline.user.create_user', 'awx.sso.pipeline.check_user_found_or_created', 'social.pipeline.social_auth.associate_user', 'social.pipeline.social_auth.load_extra_data', 'awx.sso.pipeline.set_is_active_for_new_user', 'social.pipeline.user.user_details', 'awx.sso.pipeline.prevent_inactive_login', 'awx.sso.pipeline.update_user_orgs', 'awx.sso.pipeline.update_user_teams', ) SOCIAL_AUTH_LOGIN_URL = '/' SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/sso/complete/' SOCIAL_AUTH_LOGIN_ERROR_URL = '/sso/error/' SOCIAL_AUTH_INACTIVE_USER_URL = '/sso/inactive/' SOCIAL_AUTH_RAISE_EXCEPTIONS = False SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = False SOCIAL_AUTH_SLUGIFY_USERNAMES = True SOCIAL_AUTH_CLEAN_USERNAMES = True SOCIAL_AUTH_SANITIZE_REDIRECTS = True SOCIAL_AUTH_REDIRECT_IS_HTTPS = False # Note: These settings may be overridden by database settings. SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile'] SOCIAL_AUTH_GITHUB_KEY = '' SOCIAL_AUTH_GITHUB_SECRET = '' SOCIAL_AUTH_GITHUB_SCOPE = ['user:email', 'read:org'] SOCIAL_AUTH_GITHUB_ORG_KEY = '' SOCIAL_AUTH_GITHUB_ORG_SECRET = '' SOCIAL_AUTH_GITHUB_ORG_NAME = '' SOCIAL_AUTH_GITHUB_ORG_SCOPE = ['user:email', 'read:org'] SOCIAL_AUTH_GITHUB_TEAM_KEY = '' SOCIAL_AUTH_GITHUB_TEAM_SECRET = '' SOCIAL_AUTH_GITHUB_TEAM_ID = '' SOCIAL_AUTH_GITHUB_TEAM_SCOPE = ['user:email', 'read:org'] SOCIAL_AUTH_AZUREAD_OAUTH2_KEY = '' SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET = '' SOCIAL_AUTH_SAML_SP_ENTITY_ID = '' SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = '' SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = '' SOCIAL_AUTH_SAML_ORG_INFO = {} SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {} SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {} SOCIAL_AUTH_SAML_ENABLED_IDPS = {} # Any ANSIBLE_* settings will be passed to the subprocess environment by the # celery task. # Do not want AWX to ask interactive questions and want it to be friendly with # reprovisioning ANSIBLE_HOST_KEY_CHECKING = False # RHEL has too old of an SSH so ansible will select paramiko and this is VERY # slow. ANSIBLE_PARAMIKO_RECORD_HOST_KEYS = False # Force ansible in color even if we don't have a TTY so we can properly colorize # output ANSIBLE_FORCE_COLOR = True # Additional environment variables to be passed to the subprocess started by # the celery task. AWX_TASK_ENV = {} # Flag to enable/disable updating hosts M2M when saving job events. CAPTURE_JOB_EVENT_HOSTS = False # Rebuild Host Smart Inventory memberships. AWX_REBUILD_SMART_MEMBERSHIP = False # Enable bubblewrap support for running jobs (playbook runs only). # Note: This setting may be overridden by database settings. AWX_PROOT_ENABLED = True # Command/path to bubblewrap. AWX_PROOT_CMD = 'bwrap' # Additional paths to hide from jobs using bubblewrap. # Note: This setting may be overridden by database settings. AWX_PROOT_HIDE_PATHS = [] # Additional paths to show for jobs using bubbelwrap. # Note: This setting may be overridden by database settings. AWX_PROOT_SHOW_PATHS = [] # Number of jobs to show as part of the job template history AWX_JOB_TEMPLATE_HISTORY = 10 # The directory in which Tower will create new temporary directories for job # execution and isolation (such as credential files and custom # inventory scripts). # Note: This setting may be overridden by database settings. AWX_PROOT_BASE_PATH = "/tmp" # User definable ansible callback plugins # Note: This setting may be overridden by database settings. AWX_ANSIBLE_CALLBACK_PLUGINS = "" # Time at which an HA node is considered active AWX_ACTIVE_NODE_TIME = 7200 # The number of seconds to sleep between status checks for jobs running on isolated nodes AWX_ISOLATED_CHECK_INTERVAL = 30 # The timeout (in seconds) for launching jobs on isolated nodes AWX_ISOLATED_LAUNCH_TIMEOUT = 600 # Ansible connection timeout (in seconds) for communicating with isolated instances AWX_ISOLATED_CONNECTION_TIMEOUT = 10 # The time (in seconds) between the periodic isolated heartbeat status check AWX_ISOLATED_PERIODIC_CHECK = 600 # Enable Pendo on the UI, possible values are 'off', 'anonymous', and 'detailed' # Note: This setting may be overridden by database settings. PENDO_TRACKING_STATE = "off" # Default list of modules allowed for ad hoc commands. # Note: This setting may be overridden by database settings. AD_HOC_COMMANDS = [ 'command', 'shell', 'yum', 'apt', 'apt_key', 'apt_repository', 'apt_rpm', 'service', 'group', 'user', 'mount', 'ping', 'selinux', 'setup', 'win_ping', 'win_service', 'win_updates', 'win_group', 'win_user', ] INV_ENV_VARIABLE_BLACKLIST = ("HOME", "USER", "_", "TERM") # ---------------- # -- Amazon EC2 -- # ---------------- # AWS does not appear to provide pretty region names via any API, so store the # list of names here. The available region IDs will be pulled from boto. # http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region EC2_REGION_NAMES = { 'us-east-1': _('US East (Northern Virginia)'), 'us-east-2': _('US East (Ohio)'), 'us-west-2': _('US West (Oregon)'), 'us-west-1': _('US West (Northern California)'), 'ca-central-1': _('Canada (Central)'), 'eu-central-1': _('EU (Frankfurt)'), 'eu-west-1': _('EU (Ireland)'), 'eu-west-2': _('EU (London)'), 'ap-southeast-1': _('Asia Pacific (Singapore)'), 'ap-southeast-2': _('Asia Pacific (Sydney)'), 'ap-northeast-1': _('Asia Pacific (Tokyo)'), 'ap-northeast-2': _('Asia Pacific (Seoul)'), 'ap-south-1': _('Asia Pacific (Mumbai)'), 'sa-east-1': _('South America (Sao Paulo)'), 'us-gov-west-1': _('US West (GovCloud)'), 'cn-north-1': _('China (Beijing)'), } EC2_REGIONS_BLACKLIST = [ 'us-gov-west-1', 'cn-north-1', ] # Inventory variable name/values for determining if host is active/enabled. EC2_ENABLED_VAR = 'ec2_state' EC2_ENABLED_VALUE = 'running' # Inventory variable name containing unique instance ID. EC2_INSTANCE_ID_VAR = 'ec2_id' # Filter for allowed group/host names when importing inventory from EC2. EC2_GROUP_FILTER = r'^.+$' EC2_HOST_FILTER = r'^.+$' EC2_EXCLUDE_EMPTY_GROUPS = True # ------------ # -- VMware -- # ------------ VMWARE_REGIONS_BLACKLIST = [] # Inventory variable name/values for determining whether a host is # active in vSphere. VMWARE_ENABLED_VAR = 'guest.gueststate' VMWARE_ENABLED_VALUE = 'running' # Inventory variable name containing the unique instance ID. VMWARE_INSTANCE_ID_VAR = 'config.instanceuuid' # Filter for allowed group and host names when importing inventory # from VMware. VMWARE_GROUP_FILTER = r'^.+$' VMWARE_HOST_FILTER = r'^.+$' VMWARE_EXCLUDE_EMPTY_GROUPS = True # --------------------------- # -- Google Compute Engine -- # --------------------------- # It's not possible to get zones in GCE without authenticating, so we # provide a list here. # Source: https://developers.google.com/compute/docs/zones GCE_REGION_CHOICES = [ ('us-east1-b', _('US East 1 (B)')), ('us-east1-c', _('US East 1 (C)')), ('us-east1-d', _('US East 1 (D)')), ('us-east4-a', _('US East 4 (A)')), ('us-east4-b', _('US East 4 (B)')), ('us-east4-c', _('US East 4 (C)')), ('us-central1-a', _('US Central (A)')), ('us-central1-b', _('US Central (B)')), ('us-central1-c', _('US Central (C)')), ('us-central1-f', _('US Central (F)')), ('us-west1-a', _('US West (A)')), ('us-west1-b', _('US West (B)')), ('us-west1-c', _('US West (C)')), ('europe-west1-b', _('Europe West 1 (B)')), ('europe-west1-c', _('Europe West 1 (C)')), ('europe-west1-d', _('Europe West 1 (D)')), ('europe-west2-a', _('Europe West 2 (A)')), ('europe-west2-b', _('Europe West 2 (B)')), ('europe-west2-c', _('Europe West 2 (C)')), ('asia-east1-a', _('Asia East (A)')), ('asia-east1-b', _('Asia East (B)')), ('asia-east1-c', _('Asia East (C)')), ('asia-southeast1-a', _('Asia Southeast (A)')), ('asia-southeast1-b', _('Asia Southeast (B)')), ('asia-northeast1-a', _('Asia Northeast (A)')), ('asia-northeast1-b', _('Asia Northeast (B)')), ('asia-northeast1-c', _('Asia Northeast (C)')), ('australia-southeast1-a', _('Australia Southeast (A)')), ('australia-southeast1-b', _('Australia Southeast (B)')), ('australia-southeast1-c', _('Australia Southeast (C)')), ] GCE_REGIONS_BLACKLIST = [] # Inventory variable name/value for determining whether a host is active # in Google Compute Engine. GCE_ENABLED_VAR = 'status' GCE_ENABLED_VALUE = 'running' # Filter for allowed group and host names when importing inventory from # Google Compute Engine. GCE_GROUP_FILTER = r'^.+$' GCE_HOST_FILTER = r'^.+$' GCE_EXCLUDE_EMPTY_GROUPS = True GCE_INSTANCE_ID_VAR = None # ------------------- # -- Microsoft Azure -- # ------------------- # It's not possible to get zones in Azure without authenticating, so we # provide a list here. AZURE_REGION_CHOICES = [ ('eastus', _('US East')), ('eastus2', _('US East 2')), ('centralus', _('US Central')), ('northcentralus', _('US North Central')), ('southcentralus', _('US South Central')), ('westcentralus', _('US West Central')), ('westus', _('US West')), ('westus2', _('US West 2')), ('canadaeast', _('Canada East')), ('canadacentral', _('Canada Central')), ('brazilsouth', _('Brazil South')), ('northeurope', _('Europe North')), ('westeurope', _('Europe West')), ('ukwest', _('UK West')), ('uksouth', _('UK South')), ('eastasia', _('Asia East')), ('southestasia', _('Asia Southeast')), ('australiaeast', _('Australia East')), ('australiasoutheast', _('Australia Southeast')), ('westindia', _('India West')), ('southindia', _('India South')), ('japaneast', _('Japan East')), ('japanwest', _('Japan West')), ('koreacentral', _('Korea Central')), ('koreasouth', _('Korea South')), ] AZURE_REGIONS_BLACKLIST = [] # Inventory variable name/value for determining whether a host is active # in Microsoft Azure. AZURE_ENABLED_VAR = 'instance_status' AZURE_ENABLED_VALUE = 'ReadyRole' # Filter for allowed group and host names when importing inventory from # Microsoft Azure. AZURE_GROUP_FILTER = r'^.+$' AZURE_HOST_FILTER = r'^.+$' AZURE_EXCLUDE_EMPTY_GROUPS = True AZURE_INSTANCE_ID_VAR = 'private_id' # -------------------------------------- # -- Microsoft Azure Resource Manager -- # -------------------------------------- AZURE_RM_GROUP_FILTER = r'^.+$' AZURE_RM_HOST_FILTER = r'^.+$' AZURE_RM_ENABLED_VAR = 'powerstate' AZURE_RM_ENABLED_VALUE = 'running' AZURE_RM_INSTANCE_ID_VAR = 'id' AZURE_RM_EXCLUDE_EMPTY_GROUPS = True # --------------------- # ----- OpenStack ----- # --------------------- OPENSTACK_ENABLED_VAR = 'status' OPENSTACK_ENABLED_VALUE = 'ACTIVE' OPENSTACK_GROUP_FILTER = r'^.+$' OPENSTACK_HOST_FILTER = r'^.+$' OPENSTACK_EXCLUDE_EMPTY_GROUPS = True OPENSTACK_INSTANCE_ID_VAR = 'openstack.id' # --------------------- # ----- Foreman ----- # --------------------- SATELLITE6_ENABLED_VAR = 'foreman.enabled' SATELLITE6_ENABLED_VALUE = 'True' SATELLITE6_GROUP_FILTER = r'^.+$' SATELLITE6_HOST_FILTER = r'^.+$' SATELLITE6_EXCLUDE_EMPTY_GROUPS = True SATELLITE6_INSTANCE_ID_VAR = 'foreman.id' SATELLITE6_GROUP_PREFIX = 'foreman_' SATELLITE6_GROUP_PATTERNS = ["{app}-{tier}-{color}", "{app}-{color}", "{app}", "{tier}"] # --------------------- # ----- CloudForms ----- # --------------------- CLOUDFORMS_ENABLED_VAR = 'power_state' CLOUDFORMS_ENABLED_VALUE = 'on' CLOUDFORMS_GROUP_FILTER = r'^.+$' CLOUDFORMS_HOST_FILTER = r'^.+$' CLOUDFORMS_EXCLUDE_EMPTY_GROUPS = True CLOUDFORMS_INSTANCE_ID_VAR = 'id' # --------------------- # ----- Custom ----- # --------------------- #CUSTOM_ENABLED_VAR = #CUSTOM_ENABLED_VALUE = CUSTOM_GROUP_FILTER = r'^.+$' CUSTOM_HOST_FILTER = r'^.+$' CUSTOM_EXCLUDE_EMPTY_GROUPS = True #CUSTOM_INSTANCE_ID_VAR = # --------------------- # ----- SCM ----- # --------------------- #SCM_ENABLED_VAR = #SCM_ENABLED_VALUE = SCM_GROUP_FILTER = r'^.+$' SCM_HOST_FILTER = r'^.+$' SCM_EXCLUDE_EMPTY_GROUPS = True #SCM_INSTANCE_ID_VAR = # --------------------- # -- Activity Stream -- # --------------------- # Defaults for enabling/disabling activity stream. # Note: These settings may be overridden by database settings. ACTIVITY_STREAM_ENABLED = True ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC = False # Internal API URL for use by inventory scripts and callback plugin. INTERNAL_API_URL = 'http://127.0.0.1:%s' % DEVSERVER_DEFAULT_PORT PERSISTENT_CALLBACK_MESSAGES = True USE_CALLBACK_QUEUE = True CALLBACK_QUEUE = "callback_tasks" FACT_QUEUE = "facts" SCHEDULER_QUEUE = "scheduler" TASK_COMMAND_PORT = 6559 SOCKETIO_NOTIFICATION_PORT = 6557 SOCKETIO_LISTEN_PORT = 8080 FACT_CACHE_PORT = 6564 # Note: This setting may be overridden by database settings. ORG_ADMINS_CAN_SEE_ALL_USERS = True # Note: This setting may be overridden by database settings. TOWER_ADMIN_ALERTS = True # Note: This setting may be overridden by database settings. TOWER_URL_BASE = "https://towerhost" INSIGHTS_URL_BASE = "https://access.redhat.com" TOWER_SETTINGS_MANIFEST = {} # Settings related to external logger configuration LOG_AGGREGATOR_ENABLED = False LOG_AGGREGATOR_TCP_TIMEOUT = 5 LOG_AGGREGATOR_VERIFY_CERT = True LOG_AGGREGATOR_LEVEL = 'INFO' # The number of retry attempts for websocket session establishment # If you're encountering issues establishing websockets in clustered Tower, # raising this value can help CHANNEL_LAYER_RECEIVE_MAX_RETRY = 10 # Logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse', }, 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', }, 'require_debug_true_or_test': { '()': 'awx.main.utils.RequireDebugTrueOrTest', }, }, 'formatters': { 'simple': { 'format': '%(asctime)s %(levelname)-8s %(name)s %(message)s', }, 'json': { '()': 'awx.main.utils.formatters.LogstashFormatter' }, 'timed_import': { '()': 'awx.main.utils.formatters.TimeFormatter', 'format': '%(relativeSeconds)9.3f %(levelname)-8s %(message)s' } }, 'handlers': { 'console': { '()': 'logging.StreamHandler', 'level': 'DEBUG', 'filters': ['require_debug_true_or_test'], 'formatter': 'simple', }, 'null': { 'class': 'logging.NullHandler', }, 'file': { 'class': 'logging.NullHandler', 'formatter': 'simple', }, 'syslog': { 'level': 'WARNING', 'filters': ['require_debug_false'], 'class': 'logging.NullHandler', 'formatter': 'simple', }, 'http_receiver': { 'class': 'awx.main.utils.handlers.HTTPSNullHandler', 'level': 'DEBUG', 'formatter': 'json', 'host': '', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler', }, 'tower_warnings': { 'level': 'WARNING', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'tower.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'callback_receiver': { 'level': 'WARNING', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'callback_receiver.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'inventory_import': { 'level': 'DEBUG', 'class':'logging.StreamHandler', 'formatter': 'timed_import', }, 'task_system': { 'level': 'INFO', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'task_system.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'management_playbooks': { 'level': 'DEBUG', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'management_playbooks.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'fact_receiver': { 'level': 'WARNING', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'fact_receiver.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'system_tracking_migrations': { 'level': 'WARNING', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'tower_system_tracking_migrations.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, 'rbac_migrations': { 'level': 'WARNING', 'class':'logging.handlers.RotatingFileHandler', 'filters': ['require_debug_false'], 'filename': os.path.join(LOG_ROOT, 'tower_rbac_migrations.log'), 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 5, 'formatter':'simple', }, }, 'loggers': { 'django': { 'handlers': ['console'], }, 'django.request': { 'handlers': ['mail_admins', 'console', 'file', 'tower_warnings'], 'level': 'WARNING', }, 'rest_framework.request': { 'handlers': ['mail_admins', 'console', 'file', 'tower_warnings'], 'level': 'WARNING', 'propagate': False, }, 'py.warnings': { 'handlers': ['console'], }, 'awx': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, 'awx.conf': { 'handlers': ['null'], 'level': 'WARNING', }, 'awx.conf.settings': { 'handlers': ['null'], 'level': 'WARNING', }, 'awx.main': { 'handlers': ['null'] }, 'awx.main.commands.run_callback_receiver': { 'handlers': ['callback_receiver'], }, 'awx.isolated.manager.playbooks': { 'handlers': ['management_playbooks'], 'propagate': False }, 'awx.main.commands.inventory_import': { 'handlers': ['inventory_import'], 'propagate': False }, 'awx.main.tasks': { 'handlers': ['task_system'], }, 'awx.main.scheduler': { 'handlers': ['task_system'], }, 'awx.main.consumers': { 'handlers': ['null'] }, 'awx.main.access': { 'handlers': ['null'], 'propagate': False, }, 'awx.main.signals': { 'handlers': ['null'], 'propagate': False, }, 'awx.api.permissions': { 'handlers': ['null'], 'propagate': False, }, 'awx.analytics': { 'handlers': ['http_receiver'], 'level': 'INFO', 'propagate': False }, 'django_auth_ldap': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, 'social': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, 'system_tracking_migrations': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, 'rbac_migrations': { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, } } COLOR_LOGS = False
snahelou/awx
awx/settings/defaults.py
Python
apache-2.0
37,609
from __future__ import with_statement import textwrap import os import sys import pytest from os.path import join, normpath from tempfile import mkdtemp from mock import patch from tests.lib import assert_all_changes, pyversion from tests.lib.local_repos import local_repo, local_checkout from pip.utils import rmtree @pytest.mark.network def test_simple_uninstall(script): """ Test simple install and uninstall. """ result = script.pip('install', 'INITools==0.2') assert join(script.site_packages, 'initools') in result.files_created, ( sorted(result.files_created.keys()) ) # the import forces the generation of __pycache__ if the version of python # supports it script.run('python', '-c', "import initools") result2 = script.pip('uninstall', 'INITools', '-y') assert_all_changes(result, result2, [script.venv / 'build', 'cache']) def test_simple_uninstall_distutils(script): """ Test simple install and uninstall. """ script.scratch_path.join("distutils_install").mkdir() pkg_path = script.scratch_path / 'distutils_install' pkg_path.join("setup.py").write(textwrap.dedent(""" from distutils.core import setup setup( name='distutils-install', version='0.1', ) """)) result = script.run('python', pkg_path / 'setup.py', 'install') result = script.pip('list') assert "distutils-install (0.1)" in result.stdout script.pip('uninstall', 'distutils_install', '-y', expect_stderr=True) result2 = script.pip('list') assert "distutils-install (0.1)" not in result2.stdout @pytest.mark.network def test_uninstall_with_scripts(script): """ Uninstall an easy_installed package with scripts. """ result = script.run('easy_install', 'PyLogo', expect_stderr=True) easy_install_pth = script.site_packages / 'easy-install.pth' pylogo = sys.platform == 'win32' and 'pylogo' or 'PyLogo' assert(pylogo in result.files_updated[easy_install_pth].bytes) result2 = script.pip('uninstall', 'pylogo', '-y') assert_all_changes( result, result2, [script.venv / 'build', 'cache', easy_install_pth], ) @pytest.mark.network def test_uninstall_easy_install_after_import(script): """ Uninstall an easy_installed package after it's been imported """ result = script.run('easy_install', 'INITools==0.2', expect_stderr=True) # the import forces the generation of __pycache__ if the version of python # supports it script.run('python', '-c', "import initools") result2 = script.pip('uninstall', 'INITools', '-y') assert_all_changes( result, result2, [ script.venv / 'build', 'cache', script.site_packages / 'easy-install.pth', ] ) @pytest.mark.network def test_uninstall_namespace_package(script): """ Uninstall a distribution with a namespace package without clobbering the namespace and everything in it. """ result = script.pip('install', 'pd.requires==0.0.3', expect_error=True) assert join(script.site_packages, 'pd') in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip('uninstall', 'pd.find', '-y', expect_error=True) assert join(script.site_packages, 'pd') not in result2.files_deleted, ( sorted(result2.files_deleted.keys()) ) assert join(script.site_packages, 'pd', 'find') in result2.files_deleted, ( sorted(result2.files_deleted.keys()) ) def test_uninstall_overlapping_package(script, data): """ Uninstalling a distribution that adds modules to a pre-existing package should only remove those added modules, not the rest of the existing package. See: GitHub issue #355 (pip uninstall removes things it didn't install) """ parent_pkg = data.packages.join("parent-0.1.tar.gz") child_pkg = data.packages.join("child-0.1.tar.gz") result1 = script.pip('install', parent_pkg, expect_error=False) assert join(script.site_packages, 'parent') in result1.files_created, ( sorted(result1.files_created.keys()) ) result2 = script.pip('install', child_pkg, expect_error=False) assert join(script.site_packages, 'child') in result2.files_created, ( sorted(result2.files_created.keys()) ) assert normpath( join(script.site_packages, 'parent/plugins/child_plugin.py') ) in result2.files_created, sorted(result2.files_created.keys()) # The import forces the generation of __pycache__ if the version of python # supports it script.run('python', '-c', "import parent.plugins.child_plugin, child") result3 = script.pip('uninstall', '-y', 'child', expect_error=False) assert join(script.site_packages, 'child') in result3.files_deleted, ( sorted(result3.files_created.keys()) ) assert normpath( join(script.site_packages, 'parent/plugins/child_plugin.py') ) in result3.files_deleted, sorted(result3.files_deleted.keys()) assert join(script.site_packages, 'parent') not in result3.files_deleted, ( sorted(result3.files_deleted.keys()) ) # Additional check: uninstalling 'child' should return things to the # previous state, without unintended side effects. assert_all_changes(result2, result3, []) @pytest.mark.network def test_uninstall_console_scripts(script): """ Test uninstalling a package with more files (console_script entry points, extra directories). """ args = ['install'] args.append('discover') result = script.pip(*args, **{"expect_error": True}) assert script.bin / 'discover' + script.exe in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip('uninstall', 'discover', '-y', expect_error=True) assert_all_changes(result, result2, [script.venv / 'build', 'cache']) @pytest.mark.network def test_uninstall_easy_installed_console_scripts(script): """ Test uninstalling package with console_scripts that is easy_installed. """ args = ['easy_install'] args.append('discover') result = script.run(*args, **{"expect_stderr": True}) assert script.bin / 'discover' + script.exe in result.files_created, ( sorted(result.files_created.keys()) ) result2 = script.pip('uninstall', 'discover', '-y') assert_all_changes( result, result2, [ script.venv / 'build', 'cache', script.site_packages / 'easy-install.pth', ] ) @pytest.mark.network def test_uninstall_editable_from_svn(script, tmpdir): """ Test uninstalling an editable installation from svn. """ result = script.pip( 'install', '-e', '%s#egg=initools-dev' % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache"), ), ) result.assert_installed('INITools') result2 = script.pip('uninstall', '-y', 'initools') assert (script.venv / 'src' / 'initools' in result2.files_after) assert_all_changes( result, result2, [ script.venv / 'src', script.venv / 'build', script.site_packages / 'easy-install.pth' ], ) @pytest.mark.network def test_uninstall_editable_with_source_outside_venv(script, tmpdir): """ Test uninstalling editable install from existing source outside the venv. """ cache_dir = tmpdir.join("cache") try: temp = mkdtemp() tmpdir = join(temp, 'pip-test-package') _test_uninstall_editable_with_source_outside_venv( script, tmpdir, cache_dir, ) finally: rmtree(temp) def _test_uninstall_editable_with_source_outside_venv( script, tmpdir, cache_dir): result = script.run( 'git', 'clone', local_repo( 'git+git://github.com/pypa/pip-test-package', cache_dir, ), tmpdir, expect_stderr=True, ) result2 = script.pip('install', '-e', tmpdir) assert join( script.site_packages, 'pip-test-package.egg-link' ) in result2.files_created, list(result2.files_created.keys()) result3 = script.pip('uninstall', '-y', 'pip-test-package', expect_error=True) assert_all_changes( result, result3, [script.venv / 'build', script.site_packages / 'easy-install.pth'], ) @pytest.mark.network def test_uninstall_from_reqs_file(script, tmpdir): """ Test uninstall from a requirements file. """ script.scratch_path.join("test-req.txt").write( textwrap.dedent(""" -e %s#egg=initools-dev # and something else to test out: PyLogo<0.4 """) % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache") ) ) result = script.pip('install', '-r', 'test-req.txt') script.scratch_path.join("test-req.txt").write( textwrap.dedent(""" # -f, -i, and --extra-index-url should all be ignored by uninstall -f http://www.example.com -i http://www.example.com --extra-index-url http://www.example.com -e %s#egg=initools-dev # and something else to test out: PyLogo<0.4 """) % local_checkout( 'svn+http://svn.colorstudy.com/INITools/trunk', tmpdir.join("cache") ) ) result2 = script.pip('uninstall', '-r', 'test-req.txt', '-y') assert_all_changes( result, result2, [ script.venv / 'build', script.venv / 'src', script.scratch / 'test-req.txt', script.site_packages / 'easy-install.pth', ], ) def test_uninstall_as_egg(script, data): """ Test uninstall package installed as egg. """ to_install = data.packages.join("FSPkg") result = script.pip('install', to_install, '--egg', expect_error=False) fspkg_folder = script.site_packages / 'fspkg' egg_folder = script.site_packages / 'FSPkg-0.1.dev0-py%s.egg' % pyversion assert fspkg_folder not in result.files_created, str(result.stdout) assert egg_folder in result.files_created, str(result) result2 = script.pip('uninstall', 'FSPkg', '-y') assert_all_changes( result, result2, [ script.venv / 'build', 'cache', script.site_packages / 'easy-install.pth', ], ) def test_uninstallpathset_no_paths(caplog): """ Test UninstallPathSet logs notification when there are no paths to uninstall """ from pip.req.req_uninstall import UninstallPathSet from pkg_resources import get_distribution test_dist = get_distribution('pip') # ensure that the distribution is "local" with patch("pip.req.req_uninstall.dist_is_local") as mock_dist_is_local: mock_dist_is_local.return_value = True uninstall_set = UninstallPathSet(test_dist) uninstall_set.remove() # with no files added to set assert ( "Can't uninstall 'pip'. No files were found to uninstall." in caplog.text() ) def test_uninstallpathset_non_local(caplog): """ Test UninstallPathSet logs notification and returns (with no exception) when dist is non-local """ nonlocal_path = os.path.abspath("/nonlocal") from pip.req.req_uninstall import UninstallPathSet from pkg_resources import get_distribution test_dist = get_distribution('pip') test_dist.location = nonlocal_path # ensure that the distribution is "non-local" # setting location isn't enough, due to egg-link file checking for # develop-installs with patch("pip.req.req_uninstall.dist_is_local") as mock_dist_is_local: mock_dist_is_local.return_value = False uninstall_set = UninstallPathSet(test_dist) # with no files added to set; which is the case when trying to remove # non-local dists uninstall_set.remove() assert ( "Not uninstalling pip at %s, outside environment %s" % (nonlocal_path, sys.prefix) in caplog.text() ) def test_uninstall_wheel(script, data): """ Test uninstalling a wheel """ package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl") result = script.pip('install', package, '--no-index') dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info' assert dist_info_folder in result.files_created result2 = script.pip('uninstall', 'simple.dist', '-y') assert_all_changes(result, result2, []) def test_uninstall_setuptools_develop_install(script, data): """Try uninstall after setup.py develop followed of setup.py install""" pkg_path = data.packages.join("FSPkg") script.run('python', 'setup.py', 'develop', expect_stderr=True, cwd=pkg_path) script.run('python', 'setup.py', 'install', expect_stderr=True, cwd=pkg_path) list_result = script.pip('list') assert "FSPkg (0.1.dev0)" in list_result.stdout # Uninstall both develop and install uninstall = script.pip('uninstall', 'FSPkg', '-y') assert any(filename.endswith('.egg') for filename in uninstall.files_deleted.keys()) uninstall2 = script.pip('uninstall', 'FSPkg', '-y') assert join( script.site_packages, 'FSPkg.egg-link' ) in uninstall2.files_deleted, list(uninstall2.files_deleted.keys()) list_result2 = script.pip('list') assert "FSPkg" not in list_result2.stdout
qbdsoft/pip
tests/functional/test_uninstall.py
Python
mit
13,733
import sys sys.path.append("../") from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn import decomposition import jieba import time import glob import sys import os import random if len(sys.argv)<2: print "usage: extract_topic.py directory [n_topic] [n_top_words]" sys.exit(0) n_topic = 10 n_top_words = 25 if len(sys.argv)>2: n_topic = int(sys.argv[2]) if len(sys.argv)>3: n_top_words = int(sys.argv[3]) count_vect = CountVectorizer() docs = [] pattern = os.path.join(sys.argv[1],"*.txt") print "read "+pattern for f_name in glob.glob(pattern): with open(f_name) as f: print "read file:", f_name for line in f: #one line as a document words = " ".join(jieba.cut(line)) docs.append(words) random.shuffle(docs) print "read done." print "transform" counts = count_vect.fit_transform(docs) tfidf = TfidfTransformer().fit_transform(counts) print tfidf.shape t0 = time.time() print "training..." nmf = decomposition.NMF(n_components=n_topic).fit(tfidf) print("done in %0.3fs." % (time.time() - t0)) # Inverse the vectorizer vocabulary to be able feature_names = count_vect.get_feature_names() for topic_idx, topic in enumerate(nmf.components_): print("Topic #%d:" % topic_idx) print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) print("")
beni55/jieba
test/extract_topic.py
Python
mit
1,456
# Implementation of Extensible Dependency Grammar, as described in # Debusmann, R. (2007). Extensible Dependency Grammar: A modular # grammar formalism based on multigraph description. PhD Dissertation: # Universität des Saarlandes. # ######################################################################## # # This file is part of the HLTDI L^3 project # for parsing, generation, and translation within the # framework of Extensible Dependency Grammar. # # Copyright (C) 2011, 2011, 2012, 2013 # by the HLTDI L^3 Team <[email protected]> # # This program is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation, either version 3 of # the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # 2010.10.27 # -- Created graph.py for Graphs and Multigraphs. # Methods for creating daughter and agrs dicts and pretty printing. # 2010.10.31 # -- When Graph language is output language for translation and Graph # dimension is language's surface dimension, Graph assembles the # output sentence. For each node, it either spits out the 'word' # attribute, or it finds the pos, root, and agr dictionary and # generates the wordform. # -- Sentence generation moved to MultiGraph because it also needs # output positions, which are in a different dimension (Graph) than # the agrs that are needed for wordform generation. # 2011.02.02 # -- Semantic node labels get output in Graph.pprint() for Semantics # dimension and in Multigraph.io(). # 2011.02.06 # -- Graph.pprint() prints out the right surface form for target languages. # 2011.02.22 # -- Multigraph.var() a handy way to examine variables within a given solution (dstore). # 2011.03.10 # -- Deleted and empty nodes are marked as such; deleted nodes can be skipped in io(). # 2011.04 # -- Graphs are drawn by default. # 2013.09.04 # -- Agreement feature-values pretty-printed. # Default feature value; ignore features with this value when printing out graph from .dimension import DFLT_FV import sys DELCHAR = '*' WORDSEP = 11 LINE = 100 class Graph(dict): """The graph for a sentence on a single dimension. A dict of form {node: {label: {daughter_nodes}}}.""" def __init__(self, dimension, language=None, nodes=None, dstore=None, entries=None, is_output=False): """A graph needs a set of nodes, a domain store, and a language to get the values of variables it needs to initialize itself. A graph is a dict with nodes as keys and a label-key dict of daughters as values. Mothers are saved in a an attribute dict.""" self.dimension = dimension self.abbrev = dimension.abbrev self.language = language self.nodes = nodes self.dstore = dstore self.entries = entries self.is_output = is_output self.make_forms, self.make_order = False, False if is_output: self.make_forms = self.abbrev == self.language.surface_dim self.make_order = self.abbrev == self.language.order_dim # Agreement features self.agrs = {} # Feature names to be ignored in display self.hide_feats = language.hide_feats # Word forms if this is an output language self.forms = {} # Positions if this is an output language self.positions = {} # List of deleted (zero) nodes self.del_nodes = [] # List of root nodes self.root_nodes = [] # List of subordinate nodes self.sub_nodes = [] # Dict of mothers self.mothers = {} if nodes and dstore: self.create() def __repr__(self): return "<G {}/{} {}>".format(self.abbrev, self.dstore.name, self.dstore.level) def pprint(self, show_del=True, show_agrs=True): print('Dimension {}'.format(self.dimension.abbrev)) for node, arcs in self.items(): if show_del or node not in self.del_nodes: print(' {}'.format(node), end='') if self.forms: print(' / {}'.format(self.forms[node]), end='') print() if arcs: # arcs dict could be empty print(' Daughters:: ', end='') for label, daughters in arcs.items(): print('{}:{} '.format(label, daughters), end='') print() if node in self.agrs: print(' Agrs:: ', end='') for label, value in self.agrs[node].items(): if value: # Ignore 0 values print('{}:{} '.format(label, value), end='') print() if self.positions: print(' Position:: {}'.format(self.positions[node])) def create(self): """Enter dictionaries for each node that has daughters and find the value of agrs.""" abbrev = self.dimension.abbrev lang_abbrev = self.language.abbrev if self.language else '' for node in self.nodes: self[node] = {} nodedimD = node.dims[self.dimension] # Get the daughters of this node for each arc label for label in self.dimension.labels: # Daughter variable for this arc label var = nodedimD['outvars'][label][0] # Value of the daughter variable: indices of daughters daugh_indices = var.get_value(dstore=self.dstore) if daugh_indices: # Use the daughter nodes rather than indices daughters = [self.nodes[index] for index in daugh_indices] self[node][label] = set(daughters) for daughter in daughters: self.add_mother(daughter, node, label) # If the label is del, record that the daughters are deleted if label == 'del': self.del_nodes.extend(daughters) elif label == 'root': self.root_nodes.extend(daughters) elif label == 'sub': self.sub_nodes.extend(daughters) agrs = nodedimD.get('agrvars') if agrs: for label, var in agrs.items(): value = var.get_value(dstore=self.dstore) if value not in [()]: #, DFLT_FV]: # Don't record empty or default tuple values if node not in self.agrs: self.agrs[node] = {} self.agrs[node][label] = value # print('agrs: node {}, label {}, value {}'.format(node, label, self.agrs[node][label])) # If this is an output language, get information relevant for the # output form, including morphological features if self.make_forms or lang_abbrev == 'sem': # Add a form to the forms list for this word entry = self.entries[node] names = entry.names.get(lang_abbrev, {}) agrs = self.agrs.get(node) # if agrs: # print('Agrs for node', agrs) if 'word' in names: w = names['word'] # print("Setting form for {}, names {}, name {}, word {}".format(self, names, names.get('name'), w)) # If the translation is unknown, use ?name if 'name' in names and w == '*': self.forms[node] = '?' + names['name'] # print('Set form for {} to {}'.format(node, self.forms[node])) else: # Otherwise use word self.forms[node] = w elif 'label' in names: self.forms[node] = names['label'] elif lang_abbrev == 'sem': # Otherwise for semantics, use the value of 'lexeme' or 'gram' self.forms[node] = names.get('lexeme') or names.get('gram') elif self.language.morph_processing and 'root' in names: # The output word has to be generated from its root and agrs if 'root' not in names or 'pos' not in names: print('Node {} has only names {}'.format(node, names)) root = names['root'] pos = names['pos'] self.forms[node] = (pos, root, agrs) # print('forms for {}: {}'.format(node, self.forms[node])) elif 'lexeme' in names: form = names['lexeme'] # print('Form for node {}: {}'.format(node, form)) # Words with form 0 don't get generated if agrs and form != '0': form = form + str(agrs) self.forms[node] = form else: # Otherwise use 'word' attribute of the node self.forms[node] = node.word # Order dimension if self.make_order: # Position variable for this node posvar = nodedimD.get('posvar') # Its value is a singleton set pos = posvar.get_value(dstore=self.dstore) if not pos: # Something wrong: the node must have a position (value is empty set) print('Warning: pos var {} has no value'.format(posvar)) print(posvar, posvar.dstores) self.positions[node] = list(pos)[0] def draw_sentence(self, word_sep=WORDSEP, file=sys.stdout): """Write the words in the sentence, spaced appropriately.""" # nodes = list(self.keys()) # nodes.sort(key=lambda node: node.index) translate = self.forms for node in self.nodes: if translate: string = self.node_string(node, string=self.forms[node], word_sep=word_sep) else: string = self.node_string(node, word_sep=word_sep) print(string.ljust(word_sep), end='', file=file) print(file=file) def add_mother(self, daughter, mother, arc): """Record that mother is a mother of daughter along an arc with label arc.""" if daughter not in self.mothers: self.mothers[daughter] = {} if arc not in self.mothers[daughter]: self.mothers[daughter][arc] = set() self.mothers[daughter][arc].add(mother) def draw_roots_dels(self, word_sep=WORDSEP, file=sys.stdout): """Write deleted nodes and roots in word positions.""" root_del_string = '' for index, node in enumerate(self.nodes): if node in self.del_nodes: root_del_string += 'X'.ljust(word_sep) elif node in self.root_nodes: root_del_string += 'ROOT'.ljust(word_sep) elif node in self.sub_nodes: root_del_string += 'SUB'.ljust(word_sep) else: root_del_string += ' ' * word_sep print(root_del_string, file=file) def node_string(self, node, string='', word_sep=WORDSEP): word = string or node.word if word == 'zero': return '0' if len(word) > word_sep - 2: return word[:word_sep-2] + '.' return word def draw_order(self, word_sep=WORDSEP, file=sys.stdout): """Write the positions of the words for an output sentence.""" position_string = '' for index, node in enumerate(self.nodes): position_string += str(self.positions[node]).ljust(word_sep) print(position_string, file=file) def draw_agrs(self, word_sep=WORDSEP, infeats=None, file=sys.stdout): """Show selected agreement features and values for all nodes.""" agrs = [list(self.agrs.get(node, {}).items()) for node in self.nodes] while any(agrs): s = '' for agr in agrs: any_fv = False while agr and not any_fv: f, v = agr.pop(0) # Don't display features with negative values if len(v) == 1 and list(v)[0] < 0: continue if (infeats and f in infeats) or (f not in self.hide_feats): any_fv = True s += "{}".format(self.agr_string(f, v)).ljust(word_sep) if not any_fv: s += "".ljust(word_sep) if s: print(s, file=file) def agr_string(self, feat, value): """Simplify value int tuple to string of ints.""" vs = '' for v in value: vs += str(v) return "{}={}".format(feat, vs) def draw(self, word_sep=WORDSEP, show_agrs=True, show_order=True, file=sys.stdout): self.draw_sentence(word_sep=word_sep, file=file) if show_agrs: self.draw_agrs(word_sep=word_sep, file=file) if show_order and self.positions: self.draw_order(word_sep=word_sep, file=file) self.draw_roots_dels(word_sep=word_sep, file=file) for node, arcs in self.items(): start = node.index if arcs: for label, daughters in arcs.items(): if label not in ['del', 'root', 'sub']: for daughter in daughters: end = daughter.index self.draw_arc(start, end, label, word_sep, file=file) def draw_arc(self, start, end, label, word_sep=WORDSEP, file=sys.stdout): pos0 = start * word_sep pos1 = end * word_sep pos0 = pos0 pos1 = pos1 arrow_length = abs(pos1 - pos0) shaft = label.center(arrow_length-1, '-') if start < end: # Draw from start to end print('{}{}>'.format(' ' * pos0, shaft), file=file) else: print('{}<{}'.format(' ' * pos1, shaft), file=file) class Multigraph(dict): """The multigraph for a sentence on all dimensions. A dict of form {dim: graph}.""" def __init__(self, problem, arc_dims, if_dims, dstore, morf_verbosity=0): self.problem = problem self.sentence = problem.sentence self.sent_string = problem.name self.arc_dims = arc_dims self.if_dims = if_dims self.dstore = dstore self.nodes = problem.nodes + problem.empty_nodes self.language = problem.language # Make a dictionary for each output language: 'formgraph', 'posgraph', 'sentence' self.languages = {lang: {'sentence': [''] * len(self.sentence)} for lang in problem.languages} # Semantic "language" if there is one self.semantics = problem.semantics # For the representation of the semantic output self.semrep = [] # To hold entries of nodes self.entries = {} if self.language and self.nodes and self.dstore: self.create(morf_verbosity=morf_verbosity) def __repr__(self): ds = self.dstore level = ds.level name = '' if level > 0: name = ds.name return "<MG {}: {}>".format(name, self.problem.name) def var(self, vname): """Returns the variable dict for the variable with name vname in this solution's dstore.""" variable = self.problem.varsD.get(vname) if variable: return variable.get_value(dstore=self.dstore) def io(self, show_del=False, show_input=False, semantics=False): '''Show input and output sentences and semantics, if specified.''' if show_input: print('Input ({}): {}'.format(self.language.name, self.sent_string)) return self.sent_string if not self.languages and (self.semantics and semantics): output = self.semrep if not show_del: output = [form for form in output if form and form[0] != DELCHAR] joined = ' '.join(output) print('Semantics: {}'.format(joined)) return joined for language, attribs in self.languages.items(): output = attribs['sentence'] if not show_del: output = [form for form in output if form and form[0] != DELCHAR] joined = ' '.join(output) print('Output ({}): {}'.format(language.name, joined)) return joined def display(self, draw=True, dims=None, line=LINE, show_del=True, show_agrs=True, show_order=True, file=sys.stdout): """Pretty-print or draw the multigraph, restricting the graphs to those in dims (a list of dimension abbreviations) if dims is not None.""" self.multigraph_header(line=line, file=file) self.io(show_del=show_del) if self.groups: print('Groups') print(self.groups) # for group in self.groups.values(): # print(' ', group) for dim in self.arc_dims: abbrev = dim.abbrev if not dims or abbrev in dims: self.graph_header(abbrev, line=line, file=file) graph = self[dim.abbrev] if draw: graph.draw(show_agrs=show_agrs, file=file) else: graph.pprint(show_del=show_del) @staticmethod def d(multigraphs, dims=None, draw=True, line=LINE, file=sys.stdout): """Draw each of a collection of multigraphs.""" if not isinstance(multigraphs, list): multigraphs = [multigraphs] if dims and not isinstance(dims, list): dims = [dims] for graph in multigraphs: print(file=file) graph.display(draw=draw, dims=dims, line=line, file=file) def multigraph_header(self, line=LINE, file=sys.stdout): print(' SOLUTION {} '.format(self.__repr__()).center(line, '='), file=file) def graph_header(self, abbrev, line=LINE, file=sys.stdout): print(' {} '.format(abbrev).center(line, '_'), file=file) def get_groups(self): """Multiword expressions.""" groups = {} for node, entry in self.entries.items(): # print('Getting groups for {}/{}; {}'.format(node, entry, entry.gid)) gid = entry.gid if gid: groups[gid] = groups.get(gid, []) + [node] return groups def get_semgraph(self): """The Graph for Semantics.""" return self.get('sem') def create(self, morf_verbosity=0): # Set the node entries self.entries = {node: node.entries[node.lexvar.get_value(dstore=self.dstore)] for node in self.nodes} # Find groups (this should be by language) self.groups = self.get_groups() # Create the dimension graphs for dimension in self.arc_dims: language = dimension.language graph = Graph(dimension, language, self.nodes, self.dstore, entries=self.entries, is_output=(language and (self.language != language))) if graph.make_forms: self.languages[language]['formgraph'] = graph if graph.make_order: self.languages[language]['posgraph'] = graph self[dimension.abbrev] = graph # Create the output sentence(s) if there are output languages if self.languages: for language, attribs in self.languages.items(): sentence = attribs['sentence'] ordergraph = attribs['posgraph'] formgraph = attribs['formgraph'] del_nodes = formgraph.del_nodes for node in self.nodes: position = ordergraph.positions[node] # Sentence may not be long enough because target # requires more words than source if len(sentence) <= position: sentence.extend([''] * (position - len(sentence) + 1)) # sentence.append('') form = formgraph.forms[node] if not isinstance(form, str): # form is a tuple specifying inputs to morphological generation pos, root, dct = form if morf_verbosity: print('Generating wordform: pos', pos, 'root', root, 'dct', dct, 'formgraph', formgraph, 'form', form) wordform = language.gen_word(pos, root, dct, formgraph[node], formgraph.mothers.get(node), formgraph.forms, del_nodes) if wordform: sentence[position] = wordform # Replace the form with the generated wordform formgraph.forms[node] = wordform else: # Use the root if it's impossible to generate the wordform # sentence[position] = str(form) sentence[position] = root else: sentence[position] = form # Do this later because a word may have been deleted in morphological generation for node in self.nodes: position = ordergraph.positions[node] if node in del_nodes: # Nodes with no content; deleted in formgraph sentence[position] = DELCHAR + sentence[position] elif node in ordergraph.del_nodes: # Contentful nodes with no output position sentence[position] = '(' + sentence[position] + ')' # Create a semantic representation if self.semantics: # Create a representation for the semantic output semgraph = self.get_semgraph() for node in self.nodes: name = self.get_node_name('sem', node) # Mark deleted nodes if node in semgraph.del_nodes: name = DELCHAR + name self.semrep.append(name) def get_node_name(self, lang_abbrev, node): entry = self.entries.get(node) if entry: names = entry.names.get(lang_abbrev) if names: return names.get('word') or names.get('lexeme') or names.get('gram') or entry.get_name() return ''
LowResourceLanguages/hltdi-l3
l3xdg/graph.py
Python
gpl-3.0
23,447
# Copyright (C) 2013-2015 MetaMorph Software, Inc # Permission is hereby granted, free of charge, to any person obtaining a # copy of this data, including any software or models in source or binary # form, as well as any drawings, specifications, and documentation # (collectively "the Data"), to deal in the Data without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Data, and to # permit persons to whom the Data is furnished to do so, subject to the # following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Data. # THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. # ======================= # This version of the META tools is a fork of an original version produced # by Vanderbilt University's Institute for Software Integrated Systems (ISIS). # Their license statement: # Copyright (C) 2011-2014 Vanderbilt University # Developed with the sponsorship of the Defense Advanced Research Projects # Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights # as defined in DFARS 252.227-7013. # Permission is hereby granted, free of charge, to any person obtaining a # copy of this data, including any software or models in source or binary # form, as well as any drawings, specifications, and documentation # (collectively "the Data"), to deal in the Data without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Data, and to # permit persons to whom the Data is furnished to do so, subject to the # following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Data. # THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA. #!/home/ubuntu/requests/bin/python import jenkinsapi from jenkinsapi.jenkins import Jenkins import requests import time import calendar import datetime old_threshold = datetime.timedelta(hours=24) def munge_url(url): return url.replace("10.2.204.106:8080", "localhost:9999") import jenkinsapi.jenkinsbase def _poll(self): url = self.python_api_url(self.baseurl) url = munge_url(url) #print url return self.get_data(url) jenkinsapi.jenkinsbase.JenkinsBase._poll = _poll import jenkinsapi.build def _poll(self): #For build's we need more information for downstream and upstream builds #so we override the poll to get at the extra data for build objects url = self.python_api_url(self.baseurl) + '?depth=2' url = munge_url(url) #print url return self.get_data(url) jenkinsapi.build.Build._poll = _poll j = Jenkins('http://localhost:9999') execs = [] for (name, node) in j.get_nodes().iteritems(): execs.extend((exec_ for exec_ in j.get_executors(name).__iter__() if not exec_.is_idle())) for exec_ in execs: exec_url = exec_.get_current_executable()['url'] timestamp = requests.get(munge_url(exec_url + 'api/json')).json()['timestamp'] delta = datetime.timedelta(seconds=calendar.timegm(time.gmtime()) - timestamp/1000) if delta > old_threshold: print exec_url + " " + str(delta) #import pytz #naive_timestamp = datetime.datetime(*time.gmtime(timestamp / 1000.0)[:6]) #pytz.utc.localize(naive_timestamp) #for (job_name, job) in j.get_jobs(): # if job.is_running(): # print job_name
pombredanne/metamorphosys-desktop
metamorphosys/META/src/JobManager/get_longrunning_jobs.py
Python
mit
4,502
import os import pytest from py.path import local from socceraction.data import opta as opta from socceraction.data.opta import ( OptaCompetitionSchema, OptaEventSchema, OptaGameSchema, OptaPlayerSchema, OptaTeamSchema, ) def test_create_opta_json_loader(tmpdir: local) -> None: """It should be able to parse F1, f9 and F24 JSON feeds.""" feeds = { "f1": "f1-{competition_id}-{season_id}-{game_id}.json", "f9": "f9-{competition_id}-{season_id}-{game_id}.json", "f24": "f24-{competition_id}-{season_id}-{game_id}.json", } loader = opta.OptaLoader(root=str(tmpdir), parser="json", feeds=feeds) assert loader.parsers == { "f1": opta.parsers.F1JSONParser, "f9": opta.parsers.F9JSONParser, "f24": opta.parsers.F24JSONParser, } def test_create_opta_xml_loader(tmpdir: local) -> None: """It should be able to parse F7 and F24 XML feeds.""" feeds = { "f7": "f7-{competition_id}-{season_id}-{game_id}.json", "f24": "f24-{competition_id}-{season_id}-{game_id}.json", } loader = opta.OptaLoader(root=str(tmpdir), parser="xml", feeds=feeds) assert loader.parsers == { "f7": opta.parsers.F7XMLParser, "f24": opta.parsers.F24XMLParser, } def test_create_statsperform_loader(tmpdir: local) -> None: """It should be able to parse MA1 and MA3 StatsPerfrom feeds.""" feeds = { "ma1": "ma1-{competition_id}-{season_id}-{game_id}.json", "ma3": "ma3-{competition_id}-{season_id}-{game_id}.json", } loader = opta.OptaLoader(root=str(tmpdir), parser="statsperform", feeds=feeds) assert loader.parsers == { "ma1": opta.parsers.MA1JSONParser, "ma3": opta.parsers.MA3JSONParser, } def test_create_whoscored_loader(tmpdir: local) -> None: """It should be able to parse WhoScored feeds.""" feeds = { "whoscored": "{competition_id}-{season_id}-{game_id}.json", } loader = opta.OptaLoader(root=str(tmpdir), parser="whoscored", feeds=feeds) assert loader.parsers == { "whoscored": opta.parsers.WhoScoredParser, } def test_create_custom_loader(tmpdir: local) -> None: """It should support a custom feed and parser.""" feeds = { "myfeed": "{competition_id}-{season_id}-{game_id}.json", } parser = { "myfeed": opta.parsers.base.OptaParser, } loader = opta.OptaLoader(root=str(tmpdir), parser=parser, feeds=feeds) assert loader.parsers == { "myfeed": opta.parsers.base.OptaParser, } def test_create_loader_with_unsupported_feed(tmpdir: local) -> None: """It should warn if a feed is not supported.""" feeds = { "f0": "f0-{competition_id}-{season_id}-{game_id}.json", } with pytest.warns( UserWarning, match="No parser available for f0 feeds. This feed is ignored." ): loader = opta.OptaLoader(root=str(tmpdir), parser="json", feeds=feeds) assert loader.parsers == {} def test_create_invalid_loader(tmpdir: local) -> None: """It should raise an error if the parser is not supported.""" feeds = { "myfeed": "{competition_id}-{season_id}-{game_id}.json", } with pytest.raises(ValueError): opta.OptaLoader(root=str(tmpdir), parser="wrong", feeds=feeds) def test_deepupdate() -> None: """It should update a dict with another dict.""" # list t1 = {'name': 'ferry', 'hobbies': ['programming', 'sci-fi']} opta.loader._deepupdate(t1, {'hobbies': ['gaming'], 'jobs': ['student']}) assert t1 == { 'name': 'ferry', 'hobbies': ['programming', 'sci-fi', 'gaming'], 'jobs': ['student'], } # set t2 = {'name': 'ferry', 'hobbies': {'programming', 'sci-fi'}} opta.loader._deepupdate(t2, {'hobbies': {'gaming'}, 'jobs': {'student'}}) assert t2 == { 'name': 'ferry', 'hobbies': {'programming', 'sci-fi', 'gaming'}, 'jobs': {'student'}, } # dict t3 = {'name': 'ferry', 'hobbies': {'programming': True, 'sci-fi': True}} opta.loader._deepupdate(t3, {'hobbies': {'gaming': True}}) assert t3 == { 'name': 'ferry', 'hobbies': {'programming': True, 'sci-fi': True, 'gaming': True}, } # value t4 = {'name': 'ferry', 'hobby': 'programming'} opta.loader._deepupdate(t4, {'hobby': 'gaming'}) assert t4 == {'name': 'ferry', 'hobby': 'gaming'} class TestJSONOptaLoader: def setup_method(self) -> None: data_dir = os.path.join(os.path.dirname(__file__), os.pardir, "datasets", "opta") self.loader = opta.OptaLoader( root=data_dir, parser="json", feeds={ "f1": "tournament-{season_id}-{competition_id}.json", "f9": "match-{season_id}-{competition_id}-{game_id}.json", "f24": "match-{season_id}-{competition_id}-{game_id}.json", }, ) def test_competitions(self) -> None: df_competitions = self.loader.competitions() assert len(df_competitions) > 0 OptaCompetitionSchema.validate(df_competitions) def test_games(self) -> None: df_games = self.loader.games(8, 2017) assert len(df_games) == 1 OptaGameSchema.validate(df_games) def test_teams(self) -> None: df_teams = self.loader.teams(918893) assert len(df_teams) == 2 OptaTeamSchema.validate(df_teams) def test_players(self) -> None: df_players = self.loader.players(918893) assert len(df_players) == 27 OptaPlayerSchema.validate(df_players) def test_events(self) -> None: df_events = self.loader.events(918893) assert len(df_events) > 0 OptaEventSchema.validate(df_events) class TestXMLOptaLoader: def setup_method(self) -> None: data_dir = os.path.join(os.path.dirname(__file__), os.pardir, "datasets", "opta") self.loader = opta.OptaLoader( root=data_dir, parser="xml", feeds={ "f7": "f7-{competition_id}-{season_id}-{game_id}-matchresults.xml", "f24": "f24-{competition_id}-{season_id}-{game_id}-eventdetails.xml", }, ) def test_competitions(self) -> None: df_competitions = self.loader.competitions() assert len(df_competitions) > 0 OptaCompetitionSchema.validate(df_competitions) def test_games(self) -> None: df_games = self.loader.games(23, 2018) assert len(df_games) == 1 OptaGameSchema.validate(df_games) def test_teams(self) -> None: df_teams = self.loader.teams(1009316) assert len(df_teams) == 2 OptaTeamSchema.validate(df_teams) def test_players(self) -> None: df_players = self.loader.players(1009316) assert len(df_players) == 36 OptaPlayerSchema.validate(df_players) def test_events(self) -> None: df_events = self.loader.events(1009316) assert len(df_events) > 0 OptaEventSchema.validate(df_events) class TestWhoscoredLoader: def setup_method(self) -> None: data_dir = os.path.join(os.path.dirname(__file__), os.pardir, "datasets", "whoscored") self.loader = opta.OptaLoader( root=data_dir, parser="whoscored", feeds={"whoscored": "{game_id}.json"}, ) # def test_competitions(self) -> None: # df_competitions = self.loader.competitions() # assert len(df_competitions) == 0 def test_games(self) -> None: df_games = self.loader.games(23, 2018) assert len(df_games) == 1 OptaGameSchema.validate(df_games) def test_teams(self) -> None: df_teams = self.loader.teams(1005916) assert len(df_teams) == 2 OptaTeamSchema.validate(df_teams) def test_players(self) -> None: df_players = self.loader.players(1005916) assert len(df_players) == 44 OptaPlayerSchema.validate(df_players) def test_events(self) -> None: df_events = self.loader.events(1005916) assert len(df_events) > 0 OptaEventSchema.validate(df_events)
ML-KULeuven/socceraction
tests/data/test_load_opta.py
Python
mit
8,173
from __future__ import unicode_literals import json from flask import Flask, request, abort, render_template from hazm import Normalizer from InformationSearcher import InformationSearcher app = Flask(__name__) normalizer = Normalizer() information_searcher = InformationSearcher('../resources/index/') @app.route('/search', methods=['POST']) def search(): if 'argument1' not in request.form or 'argument2' not in request.form or 'relation' not in request.form: abort(400) query = map(normalizer.normalize, (request.form['argument1'], request.form['argument2'], request.form['relation'])) results = information_searcher.search(*query) return json.dumps({ 'htmls': [information.html() for information in results['informations']], 'hits': results['hits'], }, ensure_ascii=False) @app.route('/') def main(): return render_template('index.html') if __name__ == '__main__': app.run(host='0.0.0.0', debug=True)
sobhe/baaz
web/main.py
Python
mit
927
#!/usr/bin/env python # Copyright 2017-present Open Networking Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from Queue import Queue import grpc from google.protobuf.empty_pb2 import Empty from twisted.internet import reactor from twisted.internet import threads from twisted.internet.defer import Deferred, inlineCallbacks, DeferredQueue, \ returnValue from common.utils.asleep import asleep from streaming_pb2 import ExperimentalServiceStub, Echo, Packet t0 = time.time() def pr(s): print '%lf %s' % (time.time() - t0, s) class ClientServices(object): def async_receive_stream(self, func, *args, **kw): queue = DeferredQueue() def _execute(): for result in func(*args, **kw): reactor.callFromThread(queue.put, result) _ = threads.deferToThread(_execute) while 1: yield queue.get() @inlineCallbacks def echo_loop(self, stub, prefix='', interval=1.0): """Send an echo message and print its return value""" seq = 0 while 1: msg = 'ECHO%05d' % seq pr('{}sending echo {}'.format(prefix, msg)) request = Echo(msg=msg, delay=interval) response = yield threads.deferToThread(stub.GetEcho, request) pr('{} got echo {}'.format(prefix, response.msg)) seq += 1 yield asleep(interval) @inlineCallbacks def receive_async_events(self, stub): e = Empty() for next in self.async_receive_stream(stub.ReceiveStreamedEvents, e): event = yield next if event.seq % 100 == 0: pr('event received: %s %s %s' % ( event.seq, event.type, event.details)) @inlineCallbacks def send_packet_stream(self, stub, interval): queue = Queue() @inlineCallbacks def get_next_from_queue(): packet = yield queue.get() returnValue(packet) def packet_generator(): while 1: packet = queue.get(block=True) yield packet def stream(stub): """This is executed on its own thread""" generator = packet_generator() result = stub.SendPackets(generator) print 'Got this after sending packets:', result, type(result) return result reactor.callInThread(stream, stub) while 1: len = queue.qsize() if len < 100: packet = Packet(source=42, content='beefstew') queue.put(packet) yield asleep(interval) if __name__ == '__main__': client_services = ClientServices() channel = grpc.insecure_channel('localhost:50050') stub = ExperimentalServiceStub(channel) reactor.callLater(0, client_services.echo_loop, stub, '', 0.2) reactor.callLater(0, client_services.echo_loop, stub, 40*' ', 2) reactor.callLater(0, client_services.receive_async_events, stub) reactor.callLater(0, client_services.send_packet_stream, stub, 0.0000001) reactor.run()
opencord/voltha
experiments/streaming_client.py
Python
apache-2.0
3,594
#!/usr/bin/env python ref_file = open('../../../result/mirage_output_miR-124_vs_RefSeq_NM_2015-07-20_miR-124_overexpression.result','r') input_file = open('../../../result/gene_exp_miR-124_overexpression_RefSeq_Rep_isoforms.diff','r') output_file = open('../../../result/gene_exp_miR-124_overexpression_RefSeq_Rep_isoforms_with_seed_type_wobbles.diff','w') ref_dict = {} for line in ref_file: line = line.rstrip() data = line.split("\t") if data[0] == 'gr_id': #print(line,end="\n",file=output_file) continue gene_symbol = data[1] seed_type = data[22] GU_wobble = data[23] seed_type2 = seed_type + '|' + GU_wobble if not gene_symbol in ref_dict: ref_dict[gene_symbol] = [seed_type2] else: ref_dict[gene_symbol].append(seed_type2) #print(ref_dict['ZSWIM2']) for line in input_file: line = line.rstrip() data = line.split("\t") if data[0] == 'gr_id': print(line,'seed_type', sep="\t",end="\n",file=output_file) continue #print(data[0]) gene_symbol = data[1] if not gene_symbol in ref_dict: print(line,'NA', sep="\t",end="\n",file=output_file) continue else: seed_match_list = ref_dict[gene_symbol] if '8mer|0' in seed_match_list: print(line,'8mer|0', sep="\t",end="\n",file=output_file) elif '8mer|1' in seed_match_list: print(line,'8mer|1', sep="\t",end="\n",file=output_file) elif '8mer|2' in seed_match_list: print(line,'8mer|2', sep="\t",end="\n",file=output_file) elif '7mer-m8|0' in seed_match_list: print(line,'7mer-m8|0', sep="\t",end="\n",file=output_file) elif '7mer-m1|0' in seed_match_list: print(line,'7mer-m1|0', sep="\t",end="\n",file=output_file) elif '7mer-m8|1' in seed_match_list: print(line,'7mer-m8|1', sep="\t",end="\n",file=output_file) elif '7mer-m1|1' in seed_match_list: print(line,'7mer-m1|1', sep="\t",end="\n",file=output_file) elif '6mer-m7|0' in seed_match_list: print(line,'6mer-m7|0', sep="\t",end="\n",file=output_file) elif '6mer-m8|0' in seed_match_list: print(line,'6mer-m8|0', sep="\t",end="\n",file=output_file) else: #pass print('ERROR: ' + gene_symbol) input_file.close()
Naoto-Imamachi/MIRAGE
scripts/module/preparation/CC2_MRE_type_with_GU_wobbles_vs_exp_miR-124.py
Python
mit
2,426
from django import template register = template.Library() @register.filter def underscore_to_space(value): return value.replace("_", " ").replace(" BE", "")
C4ptainCrunch/hackeragenda
events/templatetags/events_tags.py
Python
gpl-3.0
164
"""This is the root component for the Asphalt webnotifier tutorial.""" import logging from difflib import HtmlDiff from asphalt.core import CLIApplicationComponent from async_generator import aclosing from webnotifier.detector import ChangeDetectorComponent logger = logging.getLogger(__name__) class ApplicationComponent(CLIApplicationComponent): async def start(self, ctx): self.add_component('detector', ChangeDetectorComponent) self.add_component('mailer', backend='smtp') await super().start(ctx) async def run(self, ctx): diff = HtmlDiff() async with aclosing(ctx.detector.changed.stream_events()) as stream: async for event in stream: difference = diff.make_file(event.old_lines, event.new_lines, context=True) await ctx.mailer.create_and_deliver( subject='Change detected in %s' % event.source.url, html_body=difference) logger.info('Sent notification email')
asphalt-framework/asphalt
examples/tutorial2/webnotifier/app.py
Python
apache-2.0
1,002
# https://canvas.instructure.com/doc/api/assignments.html from datetime import datetime from canvas.core.courses import get_courses, get_courses_whitelisted, get_course_people, get_courses_by_account_id from canvas.core.io import write_xlsx_file, tada from canvas.core.assignments import get_assignments def assignments_turnitin_msonline_list(): terms = ['2017-1SP'] programs = ['NFNPO', 'NCMO'] synergis = True course_whitelist = get_courses_whitelisted([]) header = ['term', 'program', 'SIS ID', 'course name', 'assignment name', 'assignment URL', 'due date', 'points', 'group assignment', 'faculty of record'] rows = [] for course in course_whitelist or get_courses(terms, programs, synergis): course_id = course['id'] if not get_course_people(course_id, 'student'): continue course_sis_id = course['sis_course_id'] program = course['course_sis_info']['program'] for assignment in get_assignments(course_id): if 'external_tool' in assignment['submission_types']: row = [terms[0], program, course_sis_id, course['name'], assignment['name'], assignment['html_url'], assignment['due_at'][0:10] if assignment['due_at'] else '', assignment['points_possible'] if assignment['points_possible'] else '', 'X' if 'group_category_id' in assignment and assignment['group_category_id'] else '', ', '.join([p['name'] for p in get_course_people(course_id, 'Faculty of record')])] rows.append(row) print(row) write_xlsx_file('turnitin_assignments_spring_{}' .format(datetime.now().strftime('%Y.%m.%d.%H.%M.%S')), header, rows) def assignments_turnitin_msonline_list_dev(): accounts = {'DEV FNPO': '168920', 'DEV CMO': '168922'} header = ['program', 'course name', 'assignment name', 'assignment URL', 'points'] rows = [] for account in accounts: for course in get_courses_by_account_id(accounts[account], 'DEFAULT'): course_id = course['id'] for assignment in get_assignments(course_id): if 'external_tool' in assignment['submission_types']: row = [ account, course['name'], assignment['name'], assignment['html_url'], assignment['points_possible'] if assignment['points_possible'] else ''] rows.append(row) print(row) write_xlsx_file('turnitin_assignments_spring_dev_{}' .format(datetime.now().strftime('%Y.%m.%d.%H.%M.%S')), header, rows) if __name__ == '__main__': # assignments_turnitin_msonline_list() assignments_turnitin_msonline_list_dev() tada()
dgrobani/py3-canvaslms-api
assignments/assignments_turnitin_msonline_list.py
Python
mit
3,100
''' Example that creates a planar silicon sensor with a given geometry. Calculates the electrical potential and fields. For comparison also the analytical result of a planar sensor with 100% fill factor (width = pitch) is created. .. WARNING:: The calculation of the depletion region is simplified. If the depletion is not at a contant y position in the sensor (e.g. for pixels with very small fill factor) it deviates from the correct solution. ''' import numpy as np import matplotlib.pyplot as plt from scarce import fields, plot, silicon, sensor def sensor_planar(): # Sensor parameters n_eff = 6.2e12 n_pixel = 9 width = 50. pitch = 30. thickness = 250. smoothing = 0.05 resolution = 287 temperature = 300. V_bias = -300. V_readout = 0. # Create sensor pot_descr = sensor.planar_sensor(n_eff=n_eff, V_bias=V_bias, V_readout=V_readout, temperature=temperature, n_pixel=n_pixel, width=width, pitch=pitch, thickness=thickness, # Calculate drift potential only # to safe time selection='drift', resolution=resolution, # Might have to be adjusted when changing # the geometry smoothing=smoothing ) # Build in voltage needed for analytical solution V_bi = -silicon.get_diffusion_potential(n_eff, temperature) # Plot analytical / numerical result with depletion region in 1D y = np.linspace(0, thickness, 1000) x = np.zeros_like(y) plt.plot(y, pot_descr.get_potential(x, y), label='Potential, numerical', linewidth=2) pot_masked = np.ma.masked_array(pot_descr.get_potential(x, y), mask=pot_descr.get_depl_mask(x, y)) plt.plot(y, pot_masked, label='Potential, numerical, depl.', linewidth=2) plt.plot([pot_descr.get_depletion(x[500]), pot_descr.get_depletion(x[500])], plt.ylim(), label='Depletion, numerical ', linewidth=2) plt.plot(y, fields.get_potential_planar_analytic_1D(y, V_bias=V_bias + V_bi, V_readout=V_readout, n_eff=n_eff, D=thickness), '--', label='Potential, analytical', linewidth=2) plt.plot([silicon.get_depletion_depth(np.abs(V_bias), n_eff / 1e12, temperature), silicon.get_depletion_depth(np.abs(V_bias), n_eff / 1e12, temperature)], plt.ylim(), '--', label='Depletion, analytical', linewidth=2) plt.ylabel('Potential [V]') plt.legend(loc=1) plt.ylabel('Potential [V]') ax2 = plt.gca().twinx() ax2.plot(y, pot_descr.get_field(x, y)[1], '--', label='Field, numerical', linewidth=2) ax2.plot(y, fields.get_electric_field_analytic(x, y, V_bias=V_bias, V_readout=V_readout, n_eff=n_eff, D=thickness)[1], '--', label='Field, analytical', linewidth=2) plt.ylabel('Field [V/cm]') plt.legend(loc=4) plt.ylabel('Field [V/cm]') plt.title('Potential in a not fully depleted planar sensor') plt.xlabel('Position [um]') plt.grid() plt.show() # Plot numerical result in 2D plot.plot_planar_sensor(width=width, pitch=pitch, thickness=thickness, n_pixel=n_pixel, # Weighting field = 0 at backplane V_backplane=V_bias, # Weighting field = 1 at readout V_readout=V_readout, pot_func=pot_descr.get_potential, field_func=pot_descr.get_field, depl_func=pot_descr.get_depletion, # Comment in if you want to see the mesh mesh=None, # potential.mesh, title='Planar sensor potential') if __name__ == '__main__': import logging logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S") sensor_planar()
SiLab-Bonn/Scarce
scarce/examples/sensor_planar.py
Python
mit
5,156
#!/usr/bin/python import numpy as np from skimage import measure class CPM: def __init__(self, L,cells=[1],V0=None,th=1): #Create Potts model self.L=L #lattice length/width self.size=L**2 #model size self.cells=cells self.c=1+np.sum(self.cells) self.q=1+len(self.cells) #number of states self.J=np.zeros((self.q,self.q)) #couplings self.th = th # volume constraint self.type = [0] i=0 for c in self.cells: i+=1 for rep in range(c): self.type+=[i] if V0 is None: self.V0=int(np.round(L*L/(self.c-1)*0.5)) else: self.V0=V0 self.randomize_couplings() self.initialize_state() self.set_temp() def set_temp(self,T=1): self.T=1.0 self.beta=1.0/self.T def set_beta(self,B): self.beta=B if B==0: self.B=None else: self.T=1/B def randomize_couplings(self): for i in range(self.q): for j in range(i,self.q): self.J[i,j]=np.random.rand() if not i==j: self.J[j,i]=self.J[i,j] self.J[0,0]=0 def initialize_state(self): # self.s = np.random.randint(0,self.c,(self.L,self.L)) self.s=np.zeros((self.L,self.L),int) #add starting cells in random positions for ind in range(1,self.c): i,j = np.random.randint(1,self.L-1,2) # row,column of cell, borders are forbidden self.s[i,j]=ind self.VE = self.volume_energy(self.s) def volume_energy(self,s): unique, counts = np.unique(s, return_counts=True) V=counts[1:] return self.th/(2*self.V0)*np.sum((V-self.V0)**2) #old code computing areas of connected blobs # cells= measure.label(s,connectivity=1) # props=measure.regionprops(cells) # print('areas',[prop.filled_area for prop in props]) # return np.sum([(prop.filled_area-self.V0)**2 for prop in props]) def get_neighbors(self,i,j): # Get the array of Von Newmann neighbors of a cell nn = [] nn+=[self.s[(i+1)%self.L,j]] #up nn+=[self.s[(i-1)%self.L,j]] #down nn+=[self.s[i,(j-1)%self.L]] #left nn+=[self.s[i,(j+1)%self.L]] #right return np.array(nn) def get_moore_neighbors(self,i,j): # Get the array of Moore neighbors of a cell nn = [] nn+=[self.s[(i+1)%self.L,(j-1)%self.L]] #up-left nn+=[self.s[(i+1)%self.L,j]] #up nn+=[self.s[(i+1)%self.L,(j+1)%self.L]] #up-right nn+=[self.s[i,(j+1)%self.L]] #right nn+=[self.s[(i-1)%self.L,(j+1)%self.L]] #down-right nn+=[self.s[(i-1)%self.L,j]] #down nn+=[self.s[(i-1)%self.L,(j-1)%self.L]] #down-left nn+=[self.s[i,(j-1)%self.L]] #left return np.array(nn) def is_locally_connected(self,cell_domain): # Detect local connectivity # cell_domain should be 1 if cell is present and 0 otherwise transitions=0 is_connected=False if np.sum(cell_domain)>0: for i in range(8): if cell_domain[i]<cell_domain[(i+1)%8]: # +1 if transition from 0 to 1 transitions+=1 if transitions<=1: is_connected=True return is_connected def MetropolisStep(self,mode='CA'): # Execute step of Metropolis algorithm #Select candidate and target nodes if mode=='MMA': i,j = np.random.randint(1,self.L-1,2) # row,column of cell, borders are forbidden nn=self.get_neighbors(i,j) # array of cell neighbors sijnew = nn[np.random.randint(len(nn))] # target state cond = sijnew!=self.s[i,j] if mode=='CA': i,j = np.random.randint(1,self.L-1,2) # row,column of cell nn=self.get_neighbors(i,j) # array of cell neighbors nn_unique=np.unique(nn) sijnew = nn_unique[np.random.randint(len(nn_unique))] # target state domain=self.get_moore_neighbors(i,j) lc_candidate=self.is_locally_connected(domain==self.s[i,j]) lc_target=self.is_locally_connected(domain==sijnew) cond = lc_candidate and lc_target and sijnew!=self.s[i,j] #Evaluate acceptance of change if cond: eDiff = 0 coupling_neighbors=self.get_neighbors(i,j) #Compute adhesion energy difference for sn in coupling_neighbors: eDiff+= self.J[self.type[sijnew],self.type[sn]]*int(sijnew!=sn) - self.J[self.type[self.s[i,j]],self.type[sn]]*int(self.s[i,j]!=sn) #Compute volume energy difference snew=self.s.copy() snew[i,j] = sijnew VEnew = self.volume_energy(snew) eDiff += VEnew - self.VE if eDiff <= 0 or np.log(np.random.rand()) < -self.beta*eDiff: # Metropolis self.s[i,j] = sijnew self.VE = VEnew def Energy(self): E=self.volume_energy(self.s) for i in range(self.L): for j in range(self.L): if self.s[i,j]>0: coupling_neighbors=self.get_neighbors(i,j) #Compute adhesion energy difference for sn in coupling_neighbors: if sn<self.s[i,j]: #we compute each link just once E+= self.J[self.type[self.s[i,j]],self.type[sn]]*int(self.s[i,j]!=sn) return(E)
MiguelAguilera/CellularPottsModel
CPM.py
Python
gpl-3.0
4,709
"""This module contains functions pertaining to numbers in Punjabi.""" from cltk.corpus.punjabi.alphabet import DIGITS_GURMUKHI as DIGITS __author__ = ['Nimit Bhardwaj <[email protected]>'] __license__ = 'MIT License. See LICENSE.' def punToEnglish_number(number): """Thee punToEnglish_number function will take a string num which is the number written in punjabi, like ੧੨੩, this is 123, the function will convert it to 123 and return the output as 123 of type int """ output = 0 #This is a simple logic, here we go to each digit and check the number and compare its index with DIGITS list in alphabet.py for num in number: output = 10 * output + DIGITS.index(num) return output def englishToPun_number(number): """This function converts the normal english number to the punjabi number with punjabi digits, its input will be an integer of type int, and output will be a string. """ output = '' number = list(str(number)) for digit in number: output += DIGITS[int(digit)] return output
LBenzahia/cltk
cltk/corpus/punjabi/numerifier.py
Python
mit
1,080
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2011 Juan Grande # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import re import os import imp import scripts import shelve HOST = 'irc.freenode.net' PORT = 8001 CHANNEL = '#plugtree' class IrcUser: def __init__(self, nick='', user='', host=''): self._nick = nick self._user = user self._host = host def __str__(self): return 'IrcUser(nick=%s, user=%s, host=%s)' % ( self._nick, self._user, self._host ) def get_nick(self): return self._nick def get_user(self): return self._user def get_host(self): return self._host class IrcCommand: _cmd_pat = re.compile(r'^(:[^ ]+ +)?([a-zA-Z]+|\d\d\d)(.*)$') _params_pat = re.compile(r'^(( +[^: ][^ ]*)*)( +:(.*))?$') _user_pat = re.compile(r'^:([^! ]+)(!([^@ ]+)(@([^ ]+))?)? *$') def __init__(self, cmd_str): self._user = None self._cmd_name = None self._params = [] self._parse_cmd_str(cmd_str) def __str__(self): return 'IrcCommand(user=%s,cmd_name=%s,params=%s)' % ( self._user, self._cmd_name, self._params ) def get_param(self, idx): return self._params[idx] def get_cmd_name(self): return self._cmd_name def get_user(self): return self._user def _parse_cmd_str(self, cmd_str): m = IrcCommand._cmd_pat.match(cmd_str) if m == None: return None self._parse_user(m.group(1)) self._cmd_name = m.group(2) self._parse_params(m.group(3)) def _parse_params(self, params_str): if params_str != None: m = IrcCommand._params_pat.match(params_str) if m != None: # TOFIX maybe this is splitting not only on spaces, but on tabs and so if m.group(1) != None: self._params = m.group(1).split() if m.group(4) != None: self._params.append(m.group(4)) def _parse_user(self, user_str): if user_str != None: m = IrcCommand._user_pat.match(user_str) if m != None: self._user = IrcUser(nick=m.group(1), user=m.group(3), host=m.group(5)) class IrcCommandSource: def next(self): return IrcCommand() class FileIrcCommandSource(IrcCommandSource): def __init__(self, filename): f = open(filename, 'r') self._lines = f.readlines() f.close() def next(self): if len(self._lines)>0: return IrcCommand(self._lines.pop(0)) else: return None class SocketIrcCommandSource(IrcCommandSource): def __init__(self, socket): self._socket = socket self._data = '' self._lines = [] def next(self): while len(self._lines)==0: self._data = self._data + self._socket.recv(1024) lines = self._data.split('\n') self._data = lines.pop() self._lines.extend(lines) return IrcCommand(self._lines.pop(0).rstrip()) class ScriptsLoader: def __init__(self, pkg): self.scripts = [] self.pkg = pkg def load_scripts(self, nick): # TODO all scripts should be called <anything>_script.py filenames = [ fn for fn in os.listdir(self.pkg.__path__[0]) if not fn.startswith('__') and fn.endswith('.py') ] for fn in filenames: t = imp.find_module(fn[:-3], self.pkg.__path__) try: module = imp.load_module(fn[:-3], t[0], t[1], t[2]) script = module.init(nick) self.scripts.append(script) print 'Script %s loaded' % fn except ImportError as err: print 'Error loading script %s: %s' % (fn, err) finally: t[0].close() def clear_scripts(self): for m in self.scripts: del m self.scripts = [] def __iter__(self): return self.scripts.__iter__() class P3Bot: def __init__(self, realname, nick, user, host=None, sock=None, shelf=None): self._nick = nick self._user = user self._host = host self._realname = realname self._s = sock self._channel = None self._mynamepat = re.compile('(.*[, ])?%s([, ].*)?' % self._nick.lower()) self._shelf = shelf if self._shelf == None: self._shelf = shelve.open('bot-data') if self._s == None: self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, host, port): self._s.connect((HOST, PORT)) print 'Connected, registering connection...' self.send_cmd('NICK', self._nick) self.send_cmd('USER', ( self._user, 'a', 'a', self._realname ) ) print 'Connection successfully registered' def close(self): if self._s != None: self._s.close() if self._shelf != None: self._shelf.close() def join(self, channel): if self._channel == None: self.send_cmd('JOIN', channel) self._channel = channel else: print 'Only one channel accepted, currently connected to %s' % self._channel def pong(self, server): self.send_cmd('PONG', server) def send_msg(self, msg, target=None): if target==None: target = self._channel self.send_cmd('PRIVMSG', ( target, msg )) def send_cmd(self, cmd, params): source = '' if self._nick != None: source = self._nick if self._user != None: source = '%s!%s' % (source, self._user) if self._host != None: source = '%s@%s' % (source, self._host) source = ':%s ' % source if isinstance(params, str): cmd = '%s %s %s\r\n' % (source, cmd, params) else: cmd = '%s %s %s :%s\r\n' % (source, cmd, ' '.join(params[:-1]), params[-1]) print 'sending command: %s' % cmd self._s.sendall(cmd) def get_cmd_src(self): return SocketIrcCommandSource(self._s) def get_shelf(self): return self._shelf def get_nick(self): return self._nick def set_data(self, script, key, value): key = '%s/%s' % (script, key) if value != None: self._shelf[key] = value else: try: del self._shelf[key] except KeyError: pass def get_data(self, script, key): key = '%s/%s' % (script, key) try: return self._shelf[key] except KeyError: return None def run(self): loader = ScriptsLoader(scripts) loader.load_scripts(self) self.connect(HOST, PORT) self.join(CHANNEL) try: src = self.get_cmd_src() while True: cmd = src.next() if cmd.get_cmd_name() == 'PING': self.pong(cmd.get_param(0)) elif cmd.get_cmd_name() == 'PRIVMSG': msg = cmd.get_param(1) if self._mynamepat.match(msg.lower()) != None: for script in loader: resp = script.execute(cmd.get_user().get_nick(), msg) if resp!=None: if cmd.get_param(0)==self._nick: target = cmd.get_user().get_nick() else: target = self._channel self.send_msg(resp, target) finally: print 'Closing connection' self.close()
jgrande/p3bot
src/main/python/p3bot.py
Python
apache-2.0
7,406
import core.modules from core.modules.vistrails_module import Module, ModuleError import numpy from Array import * class ArrayAccess(object): my_namespace = "numpy|array|access" class GetShape(Module, ArrayAccess): """ Get the size of each dimension of an N-dimensional array""" def compute(self): a = self.get_input("Array") sh = a.get_shape() dims = len(sh) for i in xrange(dims): pname = "dim" + str(i) self.set_output(pname, sh[i]) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Array Shape", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "dim0", (basic.Integer, 'Dim 0 Size')) reg.add_output_port(cls, "dim1", (basic.Integer, 'Dim 1 Size')) reg.add_output_port(cls, "dim2", (basic.Integer, 'Dim 2 Size')) reg.add_output_port(cls, "dim3", (basic.Integer, 'Dim 3 Size'), True) reg.add_output_port(cls, "dim4", (basic.Integer, 'Dim 4 Size'), True) reg.add_output_port(cls, "dim5", (basic.Integer, 'Dim 5 Size'), True) reg.add_output_port(cls, "dim6", (basic.Integer, 'Dim 6 Size'), True) reg.add_output_port(cls, "dim7", (basic.Integer, 'Dim 7 Size'), True) class GetReals(Module, ArrayAccess): """ Get the real component of a complex array """ def compute(self): a = self.get_input("Array") b = a.get_reals() out = NDArray() out.set_array(b) self.set_output("Real Component", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Reals", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Real Component", (NDArray, 'Real Components')) class GetImaginaries(Module, ArrayAccess): """ Get the imaginary component of a complex array """ def compute(self): a = self.get_input("Array") b = a.get_imaginary() out = NDArray() out.set_array(b) self.set_output("Im Component", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Imaginaries", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Im Component", (NDArray, 'Imaginary Components')) class GetMax(Module, ArrayAccess): """ Get the maximal value from an array """ def compute(self): a = self.get_input("Array") self.set_output("Max", float(a.get_max())) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Max", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Max", (basic.Float, 'Max Value')) class GetMean(Module, ArrayAccess): """ Get the mean value of an array """ def compute(self): a = self.get_input("Array") axis = self.force_get_input("Axis") out = NDArray() out.set_array(numpy.array(a.get_mean(axis))) self.set_output("Mean", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Mean", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_input_port(cls, "Axis", (basic.Integer, 'Axis'), True) reg.add_output_port(cls, "Mean", (NDArray, 'Mean Value')) class GetMin(Module, ArrayAccess): """ Get the smallest value in an array """ def compute(self): a = self.get_input("Array") self.set_output("Min", float(a.get_min())) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Min", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Min", (basic.Float, 'Min Value')) class GetDiagonal(Module, ArrayAccess): """ Get an array representing the values on the diagonal of the input array """ def compute(self): a = self.get_input("Array") out = NDArray() out.set_array(a.get_diagonal()) self.set_output("Diagonal", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Diagonal", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Diagonal", (NDArray, 'Diagonal Elements')) class GetArrayAsType(Module, ArrayAccess): """ Cast the array to the given type """ def compute(self): a = self.get_input("Array") t = self.get_input("Type") t.setValue("0") out = NDArray() out.set_array(a.get_array_as_type(type(t.value))) self.set_output("Output", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Cast Array", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_input_port(cls, "Type", (basic.Constant, 'Cast To Type')) reg.add_output_port(cls, "Output", (NDArray, 'Output Array')) class GetConjugate(Module, ArrayAccess): """ Get the complex conjugate of the input array """ def compute(self): a = self.get_input("Array") out = NDArray() out.set_array(a.get_conjugate()) self.set_output("Conjugate", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Conjugate", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Conjugate", (NDArray, 'Complex Conjugate')) class GetFlattenedArray(Module, ArrayAccess): """ Get a flattened representation of the input array""" def compute(self): a = self.get_input("Array") out = NDArray() out.set_array(a.get_flattened()) self.set_output("Flat Array", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Flattened Array", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Flat Array", (NDArray, 'Output Vector')) class GetField(Module, ArrayAccess): """ Get a field from an array given the output datatype and offset into the array""" def compute(self): a = self.get_input("Array") dt = self.get_input("DType") dt.setValue("0") o = self.get_input("Offset") out = NDArray() out.set_array(a.get_field(type(dt.value), o)) self.set_output("Field", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Field", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_input_port(cls, "DType", (basic.Constant, 'Output Type')) reg.add_input_port(cls, "Offset", (basic.Integer, 'Offset')) reg.add_output_port(cls, "Field", (NDArray, 'Output Field')) class ToScalar(Module, ArrayAccess): """ Return an array of size 1 to a scalar """ def compute(self): a = self.get_input("Array") self.set_output("Item", float(a.get_item())) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="To Scalar", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Item", (basic.Float, 'Item')) class GetMemoryFootprint(Module, ArrayAccess): """ Return the amount of system memory consumed by the array """ def compute(self): a = self.get_input("Array") self.set_output("Size", int(a.get_mem_size())) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Memory Size", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'InputArray')) reg.add_output_port(cls, "Size", (basic.Integer, 'Memory Size')) class GetArrayRank(Module, ArrayAccess): """ Get the rank of the array """ def compute(self): a = self.get_input("Array") self.set_output("Rank", int(a.get_num_dims())) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Array Rank", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Rank", (basic.Integer, 'Array Rank')) class GetNonZeroEntries(Module, ArrayAccess): """ Get an array consisting of the indices to all non-zero entries of the input array.""" def compute(self): a = self.get_input("Array") out = NDArray() out.set_array(a.get_nonzero_indices()) self.set_output("Entries", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Non-Zero Entries", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Entreis", (NDArray, 'Output Array')) class GetArraySize(Module, ArrayAccess): """ Get the number of entries in an array """ def compute(self): a = self.get_input("Array") self.set_output("Size", a.get_num_elements()) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Array Size", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Size", (basic.Integer, 'Number of Elements')) class GetTranspose(Module, ArrayAccess): """ Get the transpose of the array """ def compute(self): a = self.get_input("Array") out = NDArray() out.set_array(a.get_transpose()) self.set_output("Transpose", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Get Transpose", namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_output_port(cls, "Transpose", (NDArray, 'Transposed Array')) class GetRowRange(Module, ArrayAccess): """ Get a set of rows from the input array """ def compute(self): a = self.get_input("Array") s = self.get_input("Start") e = self.get_input("End") out = NDArray() if self.force_get_input("One Indexed"): s = s-1 e = e-1 out.set_array(a.get_row_range(s, e)) new_index = 0 for i in range(s,e+1): out.set_row_name(a.get_name(i), new_index) new_index += 1 out.set_domain_name(a.get_domain_name()) out.set_range_name(a.get_range_name()) self.set_output("Output Array", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_input_port(cls, "Start", (basic.Integer, 'Start Index')) reg.add_input_port(cls, "End", (basic.Integer, 'End Index')) reg.add_input_port(cls, "One Indexed", (basic.Boolean, 'One Indexed'), True) reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array')) class GetRows(Module, ArrayAccess): """ Get a set of rows from the input array defined by a list of indexes """ def compute(self): l = self.force_get_input("Index List") if l == None: l = self.force_get_input_list("Indexes") if l == None or len(l) == 0: raise ModuleError("No indexes provided") l.sort() inp = self.get_input("Array") in_ar = inp.get_array() out_ar = in_ar[l[0],::] for i in range(1,len(l)): out_ar = numpy.vstack((out_ar,in_ar[i,::])) out = NDArray() for i in range(out_ar.shape[0]): out.set_row_name(inp.get_row_name(l[i]), i) out.set_array(out_ar) out_order = NDArray() out_order.set_array(numpy.array(l)) self.set_output("Output Array", out) self.set_output("Output Order", out_order) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_input_port(cls, "Indexes", (basic.Integer, 'Index')) reg.add_input_port(cls, "Index List", (basic.List, 'Index List')) reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array')) reg.add_output_port(cls, "Output Order", (NDArray, 'Output Ordering')) class GetColumnRange(Module, ArrayAccess): """ Get a set of columns from the input array """ def compute(self): a = self.get_input("Array") s = self.get_input("Start") e = self.get_input("End") out = NDArray() out.set_array(a.get_col_range(s, e-1)) self.set_output("Output Array", out) @classmethod def register(cls, reg, basic): reg.add_module(cls, namespace=cls.my_namespace) reg.add_input_port(cls, "Array", (NDArray, 'Input Array')) reg.add_input_port(cls, "Start", (basic.Integer, 'Start Index')) reg.add_input_port(cls, "End", (basic.Integer, 'End Index')) reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
Nikea/VisTrails
contrib/NumSciPy/ArrayAccess.py
Python
bsd-3-clause
13,375
""" This scripts converts SER, DM3, and DM4 files into PNG """ import argparse from ncempy.io.dm import fileDM from ncempy.io.ser import fileSER import ntpath import os from matplotlib import cm from matplotlib.image import imsave def _discover_emi(file_route): file_name = ntpath.basename(file_route) folder = os.path.dirname(file_route) parts = file_name.split("_") if len(parts)==1: # No "_" in the filename. return None emi_file_name = "{}.emi".format("_".join(parts[:-1])) emi_file_route = os.path.join(folder, emi_file_name) if not os.path.isfile(emi_file_route): # file does not exist return None return emi_file_route def extract_dimension(img, fixed_dimensions=None): out_img=img dimension=len(img.shape) if fixed_dimensions is None: if dimension == 3: fixed_dimensions=[int(img.shape[0]/2), '',''] elif dimension == 4: fixed_dimensions=['', int(img.shape[2]/2), int(img.shape[3]/2)] elif dimension > 4: raise ValueError("This scripts cannot extract PNGs from DM files" " with" " more than four dimensions without explicit" " use of --fixed_dimensions.") if len(fixed_dimensions)!=dimension: raise ValueError("Number of values to index the image ({}) do not" " match the image dimension ({}).".format( len(fixed_dimensions), dimension)) d_tuple=() selected_dimensions=0 print("{}D Selecting frame ({})".format(dimension, fixed_dimensions)) for (d, s) in zip(fixed_dimensions, img.shape): if not d: d_tuple+=(slice(None, None, None),) elif d=="m": d_tuple+=(int(s/2),) selected_dimensions+=1 elif int(d)<s: selected_dimensions+=1 d_tuple+=(int(d),) if dimension-selected_dimensions!=2: raise ValueError("Dimension extractor do not fix enough components" " to produce a 2D image. Needs to fix: {}" "".format(dimension-2)) out_img = img[d_tuple] return out_img def dm_to_png(source_file, dest_file, fixed_dimensions=None): """ Saves the DM3 or DM4 source_file as PNG dest_file. If the data has three of four dimensions. The image taken is from the middle image in those dimensions.""" f = fileDM(source_file, on_memory=True) f.parseHeader() ds = f.getDataset(0) img = ds['data'] img = extract_dimension(img, fixed_dimensions) imsave(dest_file, img, format="png", cmap=cm.gray) return f def ser_to_png(source_file, dest_file): """ Saves the SER source_file as PNG dest_file.""" emi_file = _discover_emi(source_file) f = fileSER(source_file, emi_file) ds = f.getDataset(0) img = ds[0] imsave(dest_file, img, format="png") return f def main(): parser = argparse.ArgumentParser(description='Extracts a preview png from' ' a SER, DM3, or DM4 file.') parser.add_argument('source_files', metavar='source_files', type=str, nargs="+", help='Source files, must have ser, dm3, o dm4 extension.') parser.add_argument('--out_file', dest='dest_file', action='store', type=str, nargs=1, help='Filename to write the preview png file. If not set' ' it defaults to the name of the source file appending' ' png. Only valid when a single input file is ' ' processed.', default=None) parser.add_argument('--fixed_dimensions', dest='fixed_dimensions', action='store', type=str, nargs=1, help="List of numbers, 'm'(for middle), or nothing to" " extract" " a particular slice of data. e.g., '2,,' will extract" " an png with the values y,z of x=2. ',m,,2' will " " extract all the values x,z for y=1/2shapeY, and w=2.", default=None) args = parser.parse_args() if args.dest_file is not None and len(args.source_files)>1: raise ValueError("--out_file only can be used when a single input file" " is processed.") fixed_dimensions = args.fixed_dimensions if fixed_dimensions is not None: fixed_dimensions=fixed_dimensions[0].split(',') for source_file in args.source_files: if args.dest_file is None: dest_file="{}.png".format(source_file) else: dest_file=args.dest_file[0] extension = source_file.split(".")[-1].lower() if not extension in ["dm3", "dm4", "ser"]: raise ValueError("Extension/filetype {} not supported!".format( extension)) print("Extracting from {}, saving image as {}".format(source_file, dest_file )) if extension in ["dm3","dm4"]: dm_to_png(source_file, dest_file, fixed_dimensions=fixed_dimensions) if extension in ["ser"]: ser_to_png(source_file, dest_file) if __name__ =="__main__": main()
ercius/openNCEM
ncempy/command_line/ncem2png.py
Python
gpl-3.0
5,664
# -*- encoding: utf-8 -*- import controllers import models
suhe/odoo
res-addons/website_sale_digital/__init__.py
Python
gpl-3.0
59
#!/usr/bin/env python """Convenience functions for writing LSST microservices""" import logging import os import sys import time import logging.handlers import requests import structlog from flask import Flask, jsonify, current_app # pylint: disable=redefined-builtin,too-many-arguments from past.builtins import basestring def set_flask_metadata(app, version, repository, description, api_version="1.0", name=None, auth=None, route=None): """ Sets metadata on the application to be returned via metadata routes. Parameters ---------- app : :class:`flask.Flask` instance Flask application for the microservice you're adding metadata to. version: `str` Version of your microservice. repository: `str` URL of the repository containing your microservice's source code. description: `str` Description of the microservice. api_version: `str`, optional Version of the SQuaRE service API framework. Defaults to '1.0'. name : `str`, optional Microservice name. Defaults to the Flask app name. If set, changes the Flask app name to match. auth : `dict`, `str`, or `None` The 'auth' parameter must be None, the empty string, the string 'none', or a dict containing a 'type' key, which must be 'none', 'basic', or 'bitly-proxy'. If the type is not 'none', there must also be a 'data' key containing a dict which holds authentication information appropriate to the authentication type. The legal non-dict 'auth' values are equivalent to a 'type' key of 'none'. route : `None`, `str`, or list of `str`, optional The 'route' parameter must be None, a string, or a list of strings. If supplied, each string will be prepended to the metadata route. Raises ------ TypeError If arguments are not of the appropriate type. ValueError If arguments are the right type but have illegal values. Returns ------- Nothing, but sets `app` metadata and decorates it with `/metadata` and `/v{app_version}/metadata` routes. """ errstr = set_flask_metadata.__doc__ if not isinstance(app, Flask): raise TypeError(errstr) if name is None: name = app.name app.config["NAME"] = name if app.name != name: app.name = name app.config["VERSION"] = version app.config["REPOSITORY"] = repository app.config["DESCRIPTION"] = description app.config["API_VERSION"] = api_version if not (isinstance(name, str) and isinstance(description, str) and isinstance(repository, str) and isinstance(version, str) and isinstance(api_version, str)): raise TypeError(errstr) if not (name and description and repository and version and api_version): raise ValueError(errstr) if auth is None or (isinstance(auth, str) and ((auth == "none") or (auth == ""))): auth = {"type": "none", "data": None} if not isinstance(auth, dict): raise TypeError(errstr) if "type" not in auth: raise ValueError(errstr) atp = auth["type"] if atp == "none": app.config["AUTH"] = {"type": "none", "data": None} else: if atp not in ["basic", "bitly-proxy"] or "data" not in auth: raise ValueError(errstr) app.config["AUTH"] = auth add_metadata_route(app, route) def add_metadata_route(app, route): """ Creates a /metadata route that returns service metadata. Also creates a /v{api_version}/metadata route, and those routes with ".json" appended. If route is specified, prepends it (or each component) to the front of the route. Parameters ---------- app : :class:`flask.Flask` instance Flask application for the microservice you're adding metadata to. route : `None`, `str`, or list of `str`, optional The 'route' parameter must be None, a string, or a list of strings. If supplied, each string will be prepended to the metadata route. Returns ------- Nothing, but decorates app with `/metadata` and `/v{app_version}/metadata` routes. """ errstr = add_metadata_route.__doc__ if route is None: route = [""] if isinstance(route, str): route = [route] if not isinstance(route, list): raise TypeError(errstr) if not all(isinstance(item, str) for item in route): raise TypeError(errstr) api_version = app.config["API_VERSION"] for rcomp in route: # Make canonical rcomp = "/" + rcomp.strip("/") if rcomp == "/": rcomp = "" for rbase in ["/metadata", "/v" + api_version + "/metadata"]: for rext in ["", ".json"]: rte = rcomp + rbase + rext with app.app_context(): app.add_url_rule(rte, '_return_metadata', _return_metadata) def _return_metadata(): """ Return JSON-formatted metadata for route attachment. Requires flask.current_app to be set, which means `with app.app_context()` """ app = current_app retdict = {"auth": app.config["AUTH"]["type"]} for fld in ["name", "repository", "version", "description", "api_version"]: retdict[fld] = app.config[fld.upper()] return jsonify(retdict) # pylint: disable = too-many-locals, too-many-arguments def retry_request(method, url, headers=None, payload=None, auth=None, tries=10, initial_interval=5, callback=None): """Retry an HTTP request with linear backoff. Returns the response if the status code is < 400 or waits (try * initial_interval) seconds and retries (up to tries times) if it is not. Parameters ---------- method: `str` Method: `GET`, `PUT`, or `POST` url: `str` URL of HTTP request headers: `dict` HTTP headers to supply. payload: `dict` Payload for request; passed as parameters to `GET`, JSON message body for `PUT`/`POST`. auth: `tuple` Authentication tuple for Basic/Digest/Custom HTTP Auth. tries: `int` Number of attempts to make. Defaults to `10`. initial_interval: `int` Interval between first and second try, and amount of time added before each successive attempt is made. Defaults to `5`. callback : callable A callable (function) object that is called each time a retry is needed. The callable has a keyword argument signature: - ``n``: number of tries completed (integer). - ``remaining``: number of tries remaining (integer). - ``status``: HTTP status of the previous call. - ``content``: body content of the previous call. Returns ------- :class:`requests.Response` The final HTTP Response received. Raises ------ :class:`apikit.BackendError` The `status_code` will be `500`, and the reason `Internal Server Error`. Its `content` will be diagnostic of the last response received. """ method = method.lower() attempt = 1 while True: if method == "get": resp = requests.get(url, headers=headers, params=payload, auth=auth) elif method == "put" or method == "post": resp = requests.put(url, headers=headers, json=payload, auth=auth) else: raise_ise("Bad method %s: must be 'get', 'put', or 'post" % method) if resp.status_code < 400: break delay = initial_interval * attempt if attempt >= tries: raise_ise("Failed to '%s' %s after %d attempts." % (method, url, tries) + " Last response was '%d %s' [%s]" % (resp.status_code, resp.reason, resp.text.strip())) if callback is not None: callback(n=attempt, remaining=tries - attempt, status=resp.status_code, content=resp.text.strip()) time.sleep(delay) attempt += 1 return resp def raise_ise(text): """Turn a failed request response into a BackendError that represents an Internal Server Error. Handy for reflecting HTTP errors from farther back in the call chain as failures of your service. Parameters ---------- text: `str` Error text. Raises ------ :class:`apikit.BackendError` The `status_code` will be `500`, and the reason `Internal Server Error`. Its `content` will be the text you passed. """ if isinstance(text, Exception): # Just in case we are exuberantly passed the entire Exception and # not its textual representation. text = str(text) raise BackendError(status_code=500, reason="Internal Server Error", content=text) def raise_from_response(resp): """Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters ---------- resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400. """ if resp.status_code < 400: # Request was successful. Or at least, not a failure. return raise BackendError(status_code=resp.status_code, reason=resp.reason, content=resp.text) def get_logger(file=None, syslog=False, loghost=None, level=None): """Creates a logging object compatible with Python standard logging, but which, as a `structlog` instance, emits JSON. Parameters ---------- file: `None` or `str` (default `None`) If given, send log output to file; otherwise, to `stdout`. syslog: `bool` (default `False`) If `True`, log to syslog. loghost: `None` or `str` (default `None`) If given, send syslog output to specified host, UDP port 514. level: `None` or `str` (default `None`) If given, and if one of (case-insensitive) `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`, log events of that level or higher. Defaults to `WARNING`. Returns ------- :class:`structlog.Logger` A logging object """ if not syslog: if not file: handler = logging.StreamHandler(sys.stdout) else: handler = logging.FileHandler(file) else: if loghost: handler = logging.handlers.SysLogHandler(loghost, 514) else: handler = logging.handlers.SysLogHandler() root_logger = logging.getLogger() if level: level = level.upper() lldict = { 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL } if level in lldict: root_logger.setLevel(lldict[level]) root_logger.addHandler(handler) structlog.configure( processors=[ structlog.stdlib.filter_by_level, structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.TimeStamper(fmt="iso"), structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.processors.JSONRenderer() ], context_class=structlog.threadlocal.wrap_dict(dict), logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, ) log = structlog.get_logger() return log class APIFlask(Flask): """ Creates an APIFlask, which is a :class:`flask.Flask` instance subclass which already has a /metadata route that serves the correct data, as well as a /v{api_version}/metadata route, as well as those routes with ".json" appended. It is functionally equivalent to calling `apikit.set_flask_metadata` with a :class:`Flask.flask` instance as the first argument, except that using :class:`apikit.APIFlask` will (obviously) give you an object for which `isinstance(obj,apikit.APIFlask)` is true. It will also set the Flask config variables `DEBUG` (if the environment variable `DEBUG` is set and non-empty, the value will be `True`) and `LOGGER`, which will be set to the structlog instance created for this object. If the environment variable `LOGFILE` is set, the logger will send its logs to that file rather than standard output. If `LOGFILE` is not set and `LOG_TO_SYSLOG` is set, the logger will send its logs to syslog, and additionally if `LOGHOST` is also set, then the logger will send its logs to syslog on LOGHOST port 514 UDP. If `LOGLEVEL` is set (to one of the standard `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`), logs of that severity or higher only will be recorded; otherwise the default loglevel is `WARNING`. The environment variable `DEBUG` implies `LOGLEVEL` will be treated as `DEBUG`. Parameters ---------- name: `str` Name of the microservice/Flask application. version: `str` Version of your microservice. repository: `str` URL of the repository containing your microservice's source code. description: `str` Description of the microservice. api_version: `str`, optional Version of the SQuaRE service API framework. Defaults to '1.0'. auth : `dict`, `str`, or `None` The 'auth' parameter must be None, the empty string, the string 'none', or a dict containing a 'type' key, which must be 'none', 'basic', or 'bitly-proxy'. If the type is not 'none', there must also be a 'data' key containing a dict which holds authentication information appropriate to the authentication type. The legal non-dict 'auth' values are equivalent to a 'type' key of 'none'. route : `None`, `str`, or list of `str`, optional The 'route' parameter must be None, a string, or a list of strings. If supplied, each string will be prepended to the metadata route. **kwargs Any other arguments to be passed to the :class:`flask.Flask` constructor. Raises ------ TypeError If arguments are not of the appropriate type. ValueError If arguments are the right type but have illegal values. Returns ------- :class:`apikit.APIFlask` instance. """ def __init__(self, name, version, repository, description, api_version="1.0", auth=None, route=None, **kwargs): """Initialize a new app""" if not isinstance(name, str): raise TypeError(APIFlask.__doc__) super(APIFlask, self).__init__(name, **kwargs) set_flask_metadata(self, description=description, repository=repository, version=version, api_version=api_version, auth=auth, route=route) logfile = None syslog = False loghost = None loglevel = None if "LOGFILE" in os.environ and os.environ["LOGFILE"]: logfile = os.environ["LOGFILE"] elif "LOG_TO_SYSLOG" in os.environ and os.environ["LOG_TO_SYSLOG"]: syslog = True if "LOGHOST" in os.environ and os.environ["LOGHOST"]: loghost = os.environ["LOGHOST"] if "LOGLEVEL" in os.environ and os.environ["LOGLEVEL"]: loglevel = os.environ["LOGLEVEL"] if "DEBUG" in os.environ and os.environ["DEBUG"]: self.debug = True self.config["DEBUG"] = True loglevel = "DEBUG" log = get_logger(file=logfile, syslog=syslog, loghost=loghost, level=loglevel) self.config["LOGGER"] = log def add_route_prefix(self, route): """Add a new route at the front of the metadata routes.""" add_metadata_route(self, route) class BackendError(Exception): """ Creates a JSON-formatted error for use in LSST/DM microservices. Parameters ---------- reason: `str` Reason for the exception status_code: `int`, optional Status code to be returned, defaults to 400. content: `str`, optional Textual content of the underlying error. Returns ------- :class:`apikit.BackendError` instance. This class will have the following fields: `reason`: `str` or `None` `status_code`: `int` `content`: `basestr` (Python3: `past.builtins.basestring`) or `None` Notes ----- This class is intended for use pretty much as described at (http://flask.pocoo.org/docs/0.11/patterns/apierrors/). """ reason = None status_code = 400 content = None def __init__(self, reason, status_code=None, content=None): """Exception for target service error.""" Exception.__init__(self) if not isinstance(reason, str): raise TypeError("'reason' must be a str") self.reason = reason if status_code is not None: if isinstance(status_code, int): self.status_code = status_code else: raise TypeError("'status_code' must be an int") if content is not None: if not isinstance(content, basestring): raise TypeError("'content' must be a basestring") self.content = content def __str__(self): """Useful textual representation""" return "BackendError: %d %s [%s]" % (self.status_code, self.reason, self.content) def to_dict(self): """Convenience method for creating custom error pages. Returns ------- `dict` : A dictionary with the following fields: `reason`: `str` or `None` `status_code`: `str` `error_content`: `str` or `None` The intention is to pass the resulting dict to `flask.jsonify()` to create a custom error response. """ return {"reason": self.reason, "status_code": self.status_code, "error_content": self.content}
lsst-sqre/sqre-apikit
apikit/convenience.py
Python
mit
18,708
import threading import time import json import requests class BiographySource(threading.Thread): def __init__(self, config, identifier): threading.Thread.__init__(self) self._config = config self._result = False self._identifier = identifier # Starting point for thread def run(self): title = self.get_name_from_deezer() if title: service_url = 'https://www.googleapis.com/freebase/v1/search' params = { 'query': title, 'key': self._config['config']['keys']['freebase'], 'limit': 1, 'output' : '(description)' } r = requests.get(service_url, params=params) response = json.loads(r.text) self._result = self.get_biography(response) else: self._result = {} # Retrieve the name from deezer for searching biography somewhere else def get_name_from_deezer(self): r = requests.get("http://api.deezer.com/artist/%s" % self._identifier) item = json.loads(r.text) if 'name' in item.keys(): title = item['name'] else: title = None return title # Retrieve the biography from the json result def get_biography(self, data): results = data['result'][0] output = results['output'] description = output['description'] if description: return description['/common/topic/description'][0] else: return False # Used in corresponding action to retrieve the results def retrieve_results(self): return self._result
HenriNijborg/MIS
MMR/mmr/action/sources/biographysource.py
Python
mit
1,725
# Copyright (C) 2016-2021 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from hamcrest import ( assert_that, contains_exactly, has_entry, has_entries, instance_of ) from unittest import TestCase from ycmd.tests.python import setUpModule # noqa from ycmd.tests.python import SharedYcmd from ycmd.tests.test_utils import BuildRequest class DebugInfoTest( TestCase ): @SharedYcmd def test_DebugInfo( self, app ): request_data = BuildRequest( filetype = 'python' ) assert_that( app.post_json( '/debug_info', request_data ).json, has_entry( 'completer', has_entries( { 'name': 'Python', 'items': contains_exactly( has_entries( { 'key': 'Python interpreter', 'value': instance_of( str ) } ), has_entries( { 'key': 'Python root', 'value': instance_of( str ) } ), has_entries( { 'key': 'Python path', 'value': instance_of( str ) } ), has_entries( { 'key': 'Python version', 'value': instance_of( str ) } ), has_entries( { 'key': 'Jedi version', 'value': instance_of( str ) } ), has_entries( { 'key': 'Parso version', 'value': instance_of( str ) } ) ) } ) ) )
puremourning/ycmd-1
ycmd/tests/python/debug_info_test.py
Python
gpl-3.0
2,083
import csv import sys def parse_csv(filename): f = open(filename, 'rb') reader = csv.reader(f) header = reader.next() exception_message = """Your csv header was named incorrectly, make sure there are no uneccessary commas or spaces and that they're in correct order.""" if header != ['trials', 'probabilities']: raise Exception(exception_message) trials = [] probabilities = [] for row in reader: trials.append(int(row[0])) probabilities.append(float(row[1])) threshold = sum([a * (1-b) for a, b in zip(trials, probabilities)]) return threshold, probabilities def probabilities(array, probability_dict, sum_dict): cur_prev = [10, 11] i = 1 while i < len(array): new_prev = [] for j in xrange(0, len(cur_prev)): new_prev.append(add_probability(cur_prev[j], array[i], probability_dict, sum_dict)) cur_prev = [k for j in range(len(new_prev)) for k in new_prev[j]] i += 1 return probability_dict def add_probability(prev, p, probability_dict, sum_dict): prev_prob = probability_dict[prev] prev_sum = sum_dict[prev] new_0 = prev * 10 new_1 = prev * 10 + 1 probability_dict[new_0] = prev_prob * (1-p) probability_dict[new_1] = prev_prob * p sum_dict[new_0] = prev_sum sum_dict[new_1] = prev_sum + (1 - p) return new_0, new_1 def sum_threshold(sum_dict, probability_dict, threshold): count = 0 for key in sum_dict: if sum_dict[key] >= threshold: count += probability_dict[key] return count def main(): filename = sys.argv[1] threshold, array = parse_csv(filename) probability_dict = {10: 1 - array[0], 11: array[0]} sum_dict = {10: 0, 11: 1 - array[0]} probabilities(array, probability_dict, sum_dict) to_delete = [] for key in probability_dict: if len(str(key)) < len(array) + 1: to_delete.append(key) for key in to_delete: del probability_dict[key] del sum_dict[key] # Should probably combine these in one dictionary print sum_threshold(sum_dict, probability_dict, threshold) if __name__ == "__main__": main()
AndrewJudson/jackknife
script.py
Python
mit
2,319
""" Swaggy Jenkins Jenkins API clients generated from Swagger / Open API specification # noqa: E501 The version of the OpenAPI document: 1.1.2-pre.0 Contact: [email protected] Generated by: https://openapi-generator.tech """ import sys import unittest import swaggyjenkins from swaggyjenkins.model.link import Link globals()['Link'] = Link from swaggyjenkins.model.favorite_impllinks import FavoriteImpllinks class TestFavoriteImpllinks(unittest.TestCase): """FavoriteImpllinks unit test stubs""" def setUp(self): pass def tearDown(self): pass def testFavoriteImpllinks(self): """Test FavoriteImpllinks""" # FIXME: construct object with mandatory attributes with example values # model = FavoriteImpllinks() # noqa: E501 pass if __name__ == '__main__': unittest.main()
cliffano/swaggy-jenkins
clients/python/generated/test/test_favorite_impllinks.py
Python
mit
871
#!/usr/bin/env python2 import json import os from types import * import unittest from playlistingscraper.playlistingscraper import PlayListingScraper class PlayListingScrapwerTest(unittest.TestCase): def setUp(self): listing_parser = PlayListingScraper() self.package_name = "com.google.android.youtube" self.version_code = "5021" out_dir = os.getcwd() self.json_file = os.path.join(out_dir, self.package_name + '-' + self.version_code + '.listing.json') listing_parser.scrape_remote_page(self.package_name, self.version_code, out_dir) with open(self.json_file) as data_file: self.app = json.load(data_file) def tearDown(self): os.remove(self.json_file) def test_file_exists(self): self.assertTrue(os.path.exists(self.json_file)) def test_package_name(self): self.assertEqual(self.app["n"], self.package_name, "Invalid package name") def test_title(self): self.assertGreater(len(self.app["t"]), 0, "title is empty") # assert type(self.app["t"]) is unicode, "t is not a string: %r" \ # % self.app["t"] self.assertTrue("youtube" in self.app["t"].lower(), "Unknown app title") def test_description(self): self.assertGreater(len(self.app["desc"]), 0, "description is empty") def test_category(self): self.assertGreater(len(self.app["cat"]), 0, "category is empty") def test_price(self): self.assertGreater(len(self.app["pri"]), 0, "price is empty") self.assertEqual(self.app["pri"], "Free", "Expected Free but got " + self.app["pri"]) def test_date_published(self): self.assertGreater(len(self.app["dtp"]), 0, "date published is empty") def test_os_version(self): self.assertGreater(len(self.app["os"]), 0, "os version is empty") def test_rating_count(self): assert type( self.app["rct"]) is IntType, "rating count is not an integer: %r" \ % self.app["rct"] self.assertGreater(self.app["rct"], 0, "rating count is not greater than zero") def test_rating(self): print(self.app["rate"]) assert type(self.app[ "rate"]) is FloatType, "app rating is not a float number: %r" \ % self.app["rate"] self.assertGreater(self.app["rate"], 0, "app rating is not greater than zero") def test_content_rating(self): self.assertGreater(len(self.app["crat"]), 0, "content rating is empty") def test_creator(self): self.assertGreater(len(self.app["crt"]), 0, "creator is empty") def test_creator_address(self): self.assertGreater(len(self.app["cadd"]), 0, "creator address is empty") def test_creator_url(self): self.assertGreater(len(self.app["curl"]), 0, "creator url is empty") def test_install_size(self): self.assertGreater(len(self.app["sz"]), 0, "install size is empty") def test_download_count(self): assert type( self.app["dct"]) is IntType, "download count is not an integer: %r" \ % self.app["dct"] self.assertGreater(self.app["dct"], 0, "download count is not greater than zero") def test_download_count_text(self): self.assertGreater(len(self.app["dtxt"]), 0, "download count text is empty") def test_privacy_url(self): self.assertGreater(len(self.app["purl"]), 0, "privacy url is empty") self.assertTrue(self.app["purl"].startswith("http")) def test_whats_new(self): self.assertGreater(len(self.app["new"]), 0, "whats new field is empty") if __name__ == '__main__': unittest.main()
sikuli/sieveable-tools
Play-Listing-Scraper/tests/playlistingscraper_test.py
Python
mit
4,104
import requests # config is an instance of ConfigParser def upload(filename, config): return __upload( filename, config.get('Upload', 'ServerURI'), config.get('Upload', 'Location'), config.get('Default', 'CameraName') ) def __upload(filename, server_uri, location, camera_name): f = open(filename, 'rb') # https://requests.readthedocs.org/en/latest/user/quickstart/ resp = requests.post( server_uri, files = {'photo_file': f}, data = { 'photo[location]': location, 'camera_name': camera_name } ) f.close return resp if __name__ == '__main__': import sys if len(sys.argv) < 5: print 'usage <filename> <server_uri> <location> <camera_name>' else: resp = __upload(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) status_code = resp.status_code if status_code != 201: # created print resp else: print resp.json()
xnnyygn/auto-camera
raspberry-pi/camera-agent/photo_uploader.py
Python
mit
916
#!/usr/bin/python import os import signal import time import sys def init(): # open sync file path = os.path.expanduser("~/.cache/zipfs/sync.pid") if os.path.exists(path) and os.stat(path).st_size > 0: f = open(path, "r+") # read the previous pid # check if it is running is_running = True try: os.kill(int(f.readline()), 0) except OSError: is_running = False #close file f.close() # if another process is running, then we don't need to if (is_running): sys.exit() # remake / make file to erase old contents f = open(path, "w+") # write our pid to file f.write(str(os.getpid())) # close file f.close() def sync(): #check rm log #upload new contents to server os.system("rsync --ignore-existing -r -a -v -e ssh ~/FileSystem/zipfs/o/dir/ [email protected]:/home/arvinsharma/test/") #download content from server os.system("rsync --ignore-existing -r -a -v -e ssh [email protected]:/home/arvinsharma/test/ ~/FileSystem/zipfs/o/dir/") #delete rm log'd files on server #PATH to rmlog in zip file directory will be passed in as a command line argument. path_to_log = sys.argv[1] lines = open(path_to_log, "r").readlines() for line in lines: path_to_idx = "/home/arvinsharma/test/" + line.strip() path_to_zip = "/home/arvinsharma/test/" + line.strip()[:len(line.strip()) - 4] + ".zip" os.system("ssh [email protected] 'rm " + path_to_idx + " && rm " + path_to_zip + "'") # resync every 5 minutes time.sleep(10) sync() #sync on SIGUSR1 def signal_handler(signum, frame): print('Received Signal ', signum) sync() ## Begin Execution ## init() ## sync on sigusr1 ## signal.signal(signal.SIGUSR1, signal_handler) ## sync on start ## sync()
freester1/zipfs
sync.py
Python
gpl-3.0
1,925
from toontown.coghq.SpecImports import * GlobalEntities = {1000: {'type': 'levelMgr', 'name': 'LevelMgr', 'comment': '', 'parentEntId': 0, 'modelFilename': 'phase_10/models/cogHQ/EndVault.bam'}, 1001: {'type': 'editMgr', 'name': 'EditMgr', 'parentEntId': 0, 'insertEntity': None, 'removeEntity': None, 'requestNewEntity': None, 'requestSave': None}, 0: {'type': 'zone', 'name': 'UberZone', 'comment': '', 'parentEntId': 0, 'scale': 1, 'description': '', 'visibility': []}, 10001: {'type': 'cogdoCraneCogSettings', 'name': '<unnamed>', 'comment': '', 'parentEntId': 0, 'CogFlyAwayDuration': 4.0, 'CogFlyAwayHeight': 50.0, 'CogMachineInteractDuration': 2.0, 'CogSpawnPeriod': 10.0, 'CogWalkSpeed': 12.07161265369133}, 10000: {'type': 'cogdoCraneGameSettings', 'name': '<unnamed>', 'comment': '', 'parentEntId': 0, 'EmptyFrictionCoef': 0.1, 'GameDuration': 180.0, 'Gravity': -32, 'MagnetMass': 1.0, 'MoneyBagGrabHeight': -4.1, 'RopeLinkMass': 1.0}} Scenario0 = {} levelSpec = {'globalEntities': GlobalEntities, 'scenarios': [Scenario0]}
ToonTownInfiniteRepo/ToontownInfinite
toontown/cogdominium/CogdoCraneGameSpec.py
Python
mit
1,302
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from django.http.response import HttpResponse, HttpResponseRedirect from django.utils.translation import ugettext_lazy as _ from django.views.generic.base import TemplateView from shuup.core import telemetry class TelemetryView(TemplateView): template_name = "shuup/admin/system/telemetry.jinja" def get_context_data(self, **kwargs): context = super(TelemetryView, self).get_context_data(**kwargs) context.update({ "opt_in": not telemetry.is_opt_out(), "is_grace": telemetry.is_in_grace_period(), "last_submission_time": telemetry.get_last_submission_time(), "submission_data": telemetry.get_telemetry_data(request=self.request, indent=2), "title": _("Telemetry") }) return context def get(self, request, *args, **kwargs): if "last" in request.GET: return HttpResponse(telemetry.get_last_submission_data(), content_type="text/plain; charset=UTF-8") return super(TelemetryView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): opt = request.POST.get("opt") if opt: telemetry.set_opt_out(opt == "out") return HttpResponseRedirect(request.path)
suutari/shoop
shuup/admin/modules/system/views/telemetry.py
Python
agpl-3.0
1,503
#!/usr/bin/python """ Starter code for the validation mini-project. The first step toward building your POI identifier! Start by loading/formatting the data After that, it's not our code anymore--it's yours! """ import pickle import sys sys.path.append("../tools/") from feature_format import featureFormat, targetFeatureSplit data_dict = pickle.load(open("../final_project/final_project_dataset.pkl", "r") ) ### first element is our labels, any added elements are predictor ### features. Keep this the same for the mini-project, but you'll ### have a different feature list when you do the final project. features_list = ["poi", "salary"] data = featureFormat(data_dict, features_list) labels, features = targetFeatureSplit(data) from sklearn.model_selection import train_test_split features_training, features_test, labels_training, labels_test = train_test_split(features, labels, random_state=42, test_size=0.3) ### it's all yours from here forward! from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() clf.fit(features_training, labels_training) print(clf.score(features_test, labels_test))
yavuzovski/playground
machine learning/Udacity/ud120-projects/validation/validate_poi.py
Python
gpl-3.0
1,149
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from typing import Dict, Type from .base import ForwardingRulesTransport from .rest import ForwardingRulesRestTransport from .rest import ForwardingRulesRestInterceptor # Compile a registry of transports. _transport_registry = OrderedDict() # type: Dict[str, Type[ForwardingRulesTransport]] _transport_registry["rest"] = ForwardingRulesRestTransport __all__ = ( "ForwardingRulesTransport", "ForwardingRulesRestTransport", "ForwardingRulesRestInterceptor", )
googleapis/python-compute
google/cloud/compute_v1/services/forwarding_rules/transports/__init__.py
Python
apache-2.0
1,111
#! env/bin/python import os from flask import url_for from flask_script import Manager, Shell, commands from flask_migrate import Migrate, MigrateCommand from app import create_app, db from app.models import FlaskyConfigs, FlaskyArticles app = create_app('default') manager = Manager(app) migrate = Migrate(app, db) def make_shell_context(): return dict(app=app, db=db) manager.add_command("shell", Shell(make_context=make_shell_context)) manager.add_command('db', MigrateCommand) manager.add_command('showurls', commands.ShowUrls) ''' @app.context_processor def override_processor(): return dict(url_for=dated_url_for) def dated_url_for(endpoint, **values): if endpoint == 'static': filename = values.get('filename', None) if filename: file_path = os.path.join(app.root_path, endpoint, filename) values['v'] = int(os.stat(file_path).st_mtime) return url_for(endpoint, **values) ''' if __name__ == '__main__': manager.run()
PassionZale/My
manage.py
Python
mit
993
import errno import logging import multiprocessing import os import socket import string import subprocess import sys import time import unittest from waitress import server from waitress.compat import ( httplib, tobytes ) from waitress.utilities import cleanup_unix_socket dn = os.path.dirname here = dn(__file__) class NullHandler(logging.Handler): # pragma: no cover """A logging handler that swallows all emitted messages. """ def emit(self, record): pass def start_server(app, svr, queue, **kwargs): # pragma: no cover """Run a fixture application. """ logging.getLogger('waitress').addHandler(NullHandler()) svr(app, queue, **kwargs).run() class FixtureTcpWSGIServer(server.TcpWSGIServer): """A version of TcpWSGIServer that relays back what it's bound to. """ def __init__(self, application, queue, **kw): # pragma: no cover # Coverage doesn't see this as it's ran in a separate process. kw['port'] = 0 # Bind to any available port. super(FixtureTcpWSGIServer, self).__init__(application, **kw) queue.put(self.socket.getsockname()) class SubprocessTests(object): # For nose: all tests may be ran in separate processes. _multiprocess_can_split_ = True exe = sys.executable server = None def start_subprocess(self, target, **kw): # Spawn a server process. queue = multiprocessing.Queue() self.proc = multiprocessing.Process( target=start_server, args=(target, self.server, queue), kwargs=kw, ) self.proc.start() if self.proc.exitcode is not None: # pragma: no cover raise RuntimeError("%s didn't start" % str(target)) # Get the socket the server is listening on. self.bound_to = queue.get(timeout=5) self.sock = self.create_socket() def stop_subprocess(self): if self.proc.exitcode is None: self.proc.terminate() self.sock.close() def assertline(self, line, status, reason, version): v, s, r = (x.strip() for x in line.split(None, 2)) self.assertEqual(s, tobytes(status)) self.assertEqual(r, tobytes(reason)) self.assertEqual(v, tobytes(version)) def create_socket(self): return socket.socket(self.server.family, socket.SOCK_STREAM) def connect(self): self.sock.connect(self.bound_to) def make_http_connection(self): raise NotImplementedError # pragma: no cover def send_check_error(self, to_send): self.sock.send(to_send) class TcpTests(SubprocessTests): server = FixtureTcpWSGIServer def make_http_connection(self): return httplib.HTTPConnection(*self.bound_to) class SleepyThreadTests(TcpTests, unittest.TestCase): # test that sleepy thread doesnt block other requests def setUp(self): from waitress.tests.fixtureapps import sleepy self.start_subprocess(sleepy.app) def tearDown(self): self.stop_subprocess() def test_it(self): getline = os.path.join(here, 'fixtureapps', 'getline.py') cmds = ( [self.exe, getline, 'http://%s:%d/sleepy' % self.bound_to], [self.exe, getline, 'http://%s:%d/' % self.bound_to] ) r, w = os.pipe() procs = [] for cmd in cmds: procs.append(subprocess.Popen(cmd, stdout=w)) time.sleep(3) for proc in procs: if proc.returncode is not None: # pragma: no cover proc.terminate() # the notsleepy response should always be first returned (it sleeps # for 2 seconds, then returns; the notsleepy response should be # processed in the meantime) result = os.read(r, 10000) os.close(r) os.close(w) self.assertEqual(result, b'notsleepy returnedsleepy returned') class EchoTests(object): def setUp(self): from waitress.tests.fixtureapps import echo self.start_subprocess(echo.app) def tearDown(self): self.stop_subprocess() def test_date_and_server(self): to_send = ("GET / HTTP/1.0\n" "Content-Length: 0\n\n") to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') self.assertEqual(headers.get('server'), 'waitress') self.assertTrue(headers.get('date')) def test_bad_host_header(self): # http://corte.si/posts/code/pathod/pythonservers/index.html to_send = ("GET / HTTP/1.0\n" " Host: 0\n\n") to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '400', 'Bad Request', 'HTTP/1.0') self.assertEqual(headers.get('server'), 'waitress') self.assertTrue(headers.get('date')) def test_send_with_body(self): to_send = ("GET / HTTP/1.0\n" "Content-Length: 5\n\n") to_send += 'hello' to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') self.assertEqual(headers.get('content-length'), '5') self.assertEqual(response_body, b'hello') def test_send_empty_body(self): to_send = ("GET / HTTP/1.0\n" "Content-Length: 0\n\n") to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') self.assertEqual(headers.get('content-length'), '0') self.assertEqual(response_body, b'') def test_multiple_requests_with_body(self): for x in range(3): self.sock = self.create_socket() self.test_send_with_body() self.sock.close() def test_multiple_requests_without_body(self): for x in range(3): self.sock = self.create_socket() self.test_send_empty_body() self.sock.close() def test_without_crlf(self): data = "Echo\nthis\r\nplease" s = tobytes( "GET / HTTP/1.0\n" "Connection: close\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') self.assertEqual(int(headers['content-length']), len(data)) self.assertEqual(len(response_body), len(data)) self.assertEqual(response_body, tobytes(data)) def test_large_body(self): # 1024 characters. body = 'This string has 32 characters.\r\n' * 32 s = tobytes( "GET / HTTP/1.0\n" "Content-Length: %d\n" "\n" "%s" % (len(body), body) ) self.connect() self.sock.send(s) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') self.assertEqual(headers.get('content-length'), '1024') self.assertEqual(response_body, tobytes(body)) def test_many_clients(self): conns = [] for n in range(50): h = self.make_http_connection() h.request("GET", "/", headers={"Accept": "text/plain"}) conns.append(h) responses = [] for h in conns: response = h.getresponse() self.assertEqual(response.status, 200) responses.append(response) for response in responses: response.read() def test_chunking_request_without_content(self): header = tobytes( "GET / HTTP/1.1\n" "Transfer-Encoding: chunked\n\n" ) self.connect() self.sock.send(header) self.sock.send(b"0\r\n\r\n") fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') self.assertEqual(response_body, b'') def test_chunking_request_with_content(self): control_line = b"20;\r\n" # 20 hex = 32 dec s = b'This string has 32 characters.\r\n' expected = s * 12 header = tobytes( "GET / HTTP/1.1\n" "Transfer-Encoding: chunked\n\n" ) self.connect() self.sock.send(header) fp = self.sock.makefile('rb', 0) for n in range(12): self.sock.send(control_line) self.sock.send(s) self.sock.send(b"0\r\n\r\n") line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') self.assertEqual(response_body, expected) def test_broken_chunked_encoding(self): control_line = "20;\r\n" # 20 hex = 32 dec s = 'This string has 32 characters.\r\n' to_send = "GET / HTTP/1.1\nTransfer-Encoding: chunked\n\n" to_send += (control_line + s) # garbage in input to_send += "GET / HTTP/1.1\nTransfer-Encoding: chunked\n\n" to_send += (control_line + s) to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) # receiver caught garbage and turned it into a 400 self.assertline(line, '400', 'Bad Request', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) self.assertEqual(sorted(headers.keys()), ['content-length', 'content-type', 'date', 'server']) self.assertEqual(headers['content-type'], 'text/plain') # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_keepalive_http_10(self): # Handling of Keep-Alive within HTTP 1.0 data = "Default: Don't keep me alive" s = tobytes( "GET / HTTP/1.0\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) connection = response.getheader('Connection', '') # We sent no Connection: Keep-Alive header # Connection: close (or no header) is default. self.assertTrue(connection != 'Keep-Alive') def test_keepalive_http10_explicit(self): # If header Connection: Keep-Alive is explicitly sent, # we want to keept the connection open, we also need to return # the corresponding header data = "Keep me alive" s = tobytes( "GET / HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) connection = response.getheader('Connection', '') self.assertEqual(connection, 'Keep-Alive') def test_keepalive_http_11(self): # Handling of Keep-Alive within HTTP 1.1 # All connections are kept alive, unless stated otherwise data = "Default: Keep me alive" s = tobytes( "GET / HTTP/1.1\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data)) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) self.assertTrue(response.getheader('connection') != 'close') def test_keepalive_http11_explicit(self): # Explicitly set keep-alive data = "Default: Keep me alive" s = tobytes( "GET / HTTP/1.1\n" "Connection: keep-alive\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) self.assertTrue(response.getheader('connection') != 'close') def test_keepalive_http11_connclose(self): # specifying Connection: close explicitly data = "Don't keep me alive" s = tobytes( "GET / HTTP/1.1\n" "Connection: close\n" "Content-Length: %d\n" "\n" "%s" % (len(data), data) ) self.connect() self.sock.send(s) response = httplib.HTTPResponse(self.sock) response.begin() self.assertEqual(int(response.status), 200) self.assertEqual(response.getheader('connection'), 'close') class PipeliningTests(object): def setUp(self): from waitress.tests.fixtureapps import echo self.start_subprocess(echo.app) def tearDown(self): self.stop_subprocess() def test_pipelining(self): s = ("GET / HTTP/1.0\r\n" "Connection: %s\r\n" "Content-Length: %d\r\n" "\r\n" "%s") to_send = b'' count = 25 for n in range(count): body = "Response #%d\r\n" % (n + 1) if n + 1 < count: conn = 'keep-alive' else: conn = 'close' to_send += tobytes(s % (conn, len(body), body)) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) for n in range(count): expect_body = tobytes("Response #%d\r\n" % (n + 1)) line = fp.readline() # status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) length = int(headers.get('content-length')) or None response_body = fp.read(length) self.assertEqual(int(status), 200) self.assertEqual(length, len(response_body)) self.assertEqual(response_body, expect_body) class ExpectContinueTests(object): def setUp(self): from waitress.tests.fixtureapps import echo self.start_subprocess(echo.app) def tearDown(self): self.stop_subprocess() def test_expect_continue(self): # specifying Connection: close explicitly data = "I have expectations" to_send = tobytes( "GET / HTTP/1.1\n" "Connection: close\n" "Content-Length: %d\n" "Expect: 100-continue\n" "\n" "%s" % (len(data), data) ) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line = fp.readline() # continue status line version, status, reason = (x.strip() for x in line.split(None, 2)) self.assertEqual(int(status), 100) self.assertEqual(reason, b'Continue') self.assertEqual(version, b'HTTP/1.1') fp.readline() # blank line line = fp.readline() # next status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) length = int(headers.get('content-length')) or None response_body = fp.read(length) self.assertEqual(int(status), 200) self.assertEqual(length, len(response_body)) self.assertEqual(response_body, tobytes(data)) class BadContentLengthTests(object): def setUp(self): from waitress.tests.fixtureapps import badcl self.start_subprocess(badcl.app) def tearDown(self): self.stop_subprocess() def test_short_body(self): # check to see if server closes connection when body is too short # for cl header to_send = tobytes( "GET /short_body HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: 0\n" "\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line = fp.readline() # status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) content_length = int(headers.get('content-length')) response_body = fp.read(content_length) self.assertEqual(int(status), 200) self.assertNotEqual(content_length, len(response_body)) self.assertEqual(len(response_body), content_length - 1) self.assertEqual(response_body, tobytes('abcdefghi')) # remote closed connection (despite keepalive header); not sure why # first send succeeds self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_long_body(self): # check server doesnt close connection when body is too short # for cl header to_send = tobytes( "GET /long_body HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: 0\n" "\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line = fp.readline() # status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) content_length = int(headers.get('content-length')) or None response_body = fp.read(content_length) self.assertEqual(int(status), 200) self.assertEqual(content_length, len(response_body)) self.assertEqual(response_body, tobytes('abcdefgh')) # remote does not close connection (keepalive header) self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line = fp.readline() # status line version, status, reason = (x.strip() for x in line.split(None, 2)) headers = parse_headers(fp) content_length = int(headers.get('content-length')) or None response_body = fp.read(content_length) self.assertEqual(int(status), 200) class NoContentLengthTests(object): def setUp(self): from waitress.tests.fixtureapps import nocl self.start_subprocess(nocl.app) def tearDown(self): self.stop_subprocess() def test_http10_generator(self): body = string.ascii_letters to_send = ("GET / HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: %d\n\n" % len(body)) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') self.assertEqual(headers.get('content-length'), None) self.assertEqual(headers.get('connection'), 'close') self.assertEqual(response_body, tobytes(body)) # remote closed connection (despite keepalive header), because # generators cannot have a content-length divined self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_http10_list(self): body = string.ascii_letters to_send = ("GET /list HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: %d\n\n" % len(body)) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') self.assertEqual(headers['content-length'], str(len(body))) self.assertEqual(headers.get('connection'), 'Keep-Alive') self.assertEqual(response_body, tobytes(body)) # remote keeps connection open because it divined the content length # from a length-1 list self.sock.send(to_send) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') def test_http10_listlentwo(self): body = string.ascii_letters to_send = ("GET /list_lentwo HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: %d\n\n" % len(body)) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') self.assertEqual(headers.get('content-length'), None) self.assertEqual(headers.get('connection'), 'close') self.assertEqual(response_body, tobytes(body)) # remote closed connection (despite keepalive header), because # lists of length > 1 cannot have their content length divined self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_http11_generator(self): body = string.ascii_letters to_send = ("GET / HTTP/1.1\n" "Content-Length: %s\n\n" % len(body)) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb') line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') expected = b'' for chunk in chunks(body, 10): expected += tobytes( '%s\r\n%s\r\n' % (str(hex(len(chunk))[2:].upper()), chunk) ) expected += b'0\r\n\r\n' self.assertEqual(response_body, expected) # connection is always closed at the end of a chunked response self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_http11_list(self): body = string.ascii_letters to_send = ("GET /list HTTP/1.1\n" "Content-Length: %d\n\n" % len(body)) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') self.assertEqual(headers['content-length'], str(len(body))) self.assertEqual(response_body, tobytes(body)) # remote keeps connection open because it divined the content length # from a length-1 list self.sock.send(to_send) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') def test_http11_listlentwo(self): body = string.ascii_letters to_send = ("GET /list_lentwo HTTP/1.1\n" "Content-Length: %s\n\n" % len(body)) to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb') line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') expected = b'' for chunk in (body[0], body[1:]): expected += tobytes( '%s\r\n%s\r\n' % (str(hex(len(chunk))[2:].upper()), chunk) ) expected += b'0\r\n\r\n' self.assertEqual(response_body, expected) # connection is always closed at the end of a chunked response self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class WriteCallbackTests(object): def setUp(self): from waitress.tests.fixtureapps import writecb self.start_subprocess(writecb.app) def tearDown(self): self.stop_subprocess() def test_short_body(self): # check to see if server closes connection when body is too short # for cl header to_send = tobytes( "GET /short_body HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: 0\n" "\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) # server trusts the content-length header (5) self.assertline(line, '200', 'OK', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, 9) self.assertNotEqual(cl, len(response_body)) self.assertEqual(len(response_body), cl - 1) self.assertEqual(response_body, tobytes('abcdefgh')) # remote closed connection (despite keepalive header) self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_long_body(self): # check server doesnt close connection when body is too long # for cl header to_send = tobytes( "GET /long_body HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: 0\n" "\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) content_length = int(headers.get('content-length')) or None self.assertEqual(content_length, 9) self.assertEqual(content_length, len(response_body)) self.assertEqual(response_body, tobytes('abcdefghi')) # remote does not close connection (keepalive header) self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') def test_equal_body(self): # check server doesnt close connection when body is equal to # cl header to_send = tobytes( "GET /equal_body HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: 0\n" "\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) content_length = int(headers.get('content-length')) or None self.assertEqual(content_length, 9) self.assertline(line, '200', 'OK', 'HTTP/1.0') self.assertEqual(content_length, len(response_body)) self.assertEqual(response_body, tobytes('abcdefghi')) # remote does not close connection (keepalive header) self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') def test_no_content_length(self): # wtf happens when there's no content-length to_send = tobytes( "GET /no_content_length HTTP/1.0\n" "Connection: Keep-Alive\n" "Content-Length: 0\n" "\n" ) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line = fp.readline() # status line line, headers, response_body = read_http(fp) content_length = headers.get('content-length') self.assertEqual(content_length, None) self.assertEqual(response_body, tobytes('abcdefghi')) # remote closed connection (despite keepalive header) self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class TooLargeTests(object): toobig = 1050 def setUp(self): from waitress.tests.fixtureapps import toolarge self.start_subprocess(toolarge.app, max_request_header_size=1000, max_request_body_size=1000) def tearDown(self): self.stop_subprocess() def test_request_body_too_large_with_wrong_cl_http10(self): body = 'a' * self.toobig to_send = ("GET / HTTP/1.0\n" "Content-Length: 5\n\n") to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb') # first request succeeds (content-length 5) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # server trusts the content-length header; no pipelining, # so request fulfilled, extra bytes are thrown away # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_wrong_cl_http10_keepalive(self): body = 'a' * self.toobig to_send = ("GET / HTTP/1.0\n" "Content-Length: 5\n" "Connection: Keep-Alive\n\n") to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb') # first request succeeds (content-length 5) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) line, headers, response_body = read_http(fp) self.assertline(line, '431', 'Request Header Fields Too Large', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_no_cl_http10(self): body = 'a' * self.toobig to_send = "GET / HTTP/1.0\n\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # extra bytes are thrown away (no pipelining), connection closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_no_cl_http10_keepalive(self): body = 'a' * self.toobig to_send = "GET / HTTP/1.0\nConnection: Keep-Alive\n\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) # server trusts the content-length header (assumed zero) self.assertline(line, '200', 'OK', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) line, headers, response_body = read_http(fp) # next response overruns because the extra data appears to be # header data self.assertline(line, '431', 'Request Header Fields Too Large', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_wrong_cl_http11(self): body = 'a' * self.toobig to_send = ("GET / HTTP/1.1\n" "Content-Length: 5\n\n") to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb') # first request succeeds (content-length 5) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # second response is an error response line, headers, response_body = read_http(fp) self.assertline(line, '431', 'Request Header Fields Too Large', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_wrong_cl_http11_connclose(self): body = 'a' * self.toobig to_send = "GET / HTTP/1.1\nContent-Length: 5\nConnection: close\n\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) # server trusts the content-length header (5) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_no_cl_http11(self): body = 'a' * self.toobig to_send = "GET / HTTP/1.1\n\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb') # server trusts the content-length header (assumed 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # server assumes pipelined requests due to http/1.1, and the first # request was assumed c-l 0 because it had no content-length header, # so entire body looks like the header of the subsequent request # second response is an error response line, headers, response_body = read_http(fp) self.assertline(line, '431', 'Request Header Fields Too Large', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_with_no_cl_http11_connclose(self): body = 'a' * self.toobig to_send = "GET / HTTP/1.1\nConnection: close\n\n" to_send += body to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) # server trusts the content-length header (assumed 0) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_request_body_too_large_chunked_encoding(self): control_line = "20;\r\n" # 20 hex = 32 dec s = 'This string has 32 characters.\r\n' to_send = "GET / HTTP/1.1\nTransfer-Encoding: chunked\n\n" repeat = control_line + s to_send += repeat * ((self.toobig // len(repeat)) + 1) to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) # body bytes counter caught a max_request_body_size overrun self.assertline(line, '413', 'Request Entity Too Large', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) self.assertEqual(headers['content-type'], 'text/plain') # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class InternalServerErrorTests(object): def setUp(self): from waitress.tests.fixtureapps import error self.start_subprocess(error.app, expose_tracebacks=True) def tearDown(self): self.stop_subprocess() def test_before_start_response_http_10(self): to_send = "GET /before_start_response HTTP/1.0\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '500', 'Internal Server Error', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b'Internal Server Error')) self.assertEqual(headers['connection'], 'close') # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_before_start_response_http_11(self): to_send = "GET /before_start_response HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '500', 'Internal Server Error', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b'Internal Server Error')) self.assertEqual(sorted(headers.keys()), ['content-length', 'content-type', 'date', 'server']) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_before_start_response_http_11_close(self): to_send = tobytes( "GET /before_start_response HTTP/1.1\n" "Connection: close\n\n") self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '500', 'Internal Server Error', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b'Internal Server Error')) self.assertEqual(sorted(headers.keys()), ['connection', 'content-length', 'content-type', 'date', 'server']) self.assertEqual(headers['connection'], 'close') # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_after_start_response_http10(self): to_send = "GET /after_start_response HTTP/1.0\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '500', 'Internal Server Error', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b'Internal Server Error')) self.assertEqual(sorted(headers.keys()), ['connection', 'content-length', 'content-type', 'date', 'server']) self.assertEqual(headers['connection'], 'close') # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_after_start_response_http11(self): to_send = "GET /after_start_response HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '500', 'Internal Server Error', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b'Internal Server Error')) self.assertEqual(sorted(headers.keys()), ['content-length', 'content-type', 'date', 'server']) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_after_start_response_http11_close(self): to_send = tobytes( "GET /after_start_response HTTP/1.1\n" "Connection: close\n\n") self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '500', 'Internal Server Error', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) self.assertTrue(response_body.startswith(b'Internal Server Error')) self.assertEqual(sorted(headers.keys()), ['connection', 'content-length', 'content-type', 'date', 'server']) self.assertEqual(headers['connection'], 'close') # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_after_write_cb(self): to_send = "GET /after_write_cb HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') self.assertEqual(response_body, b'') # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_in_generator(self): to_send = "GET /in_generator HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') self.assertEqual(response_body, b'') # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class FileWrapperTests(object): def setUp(self): from waitress.tests.fixtureapps import filewrapper self.start_subprocess(filewrapper.app) def tearDown(self): self.stop_subprocess() def test_filelike_http11(self): to_send = "GET /filelike HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has not been closed def test_filelike_nocl_http11(self): to_send = "GET /filelike_nocl HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has not been closed def test_filelike_shortcl_http11(self): to_send = "GET /filelike_shortcl HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, 1) self.assertEqual(cl, len(response_body)) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377' in response_body) # connection has not been closed def test_filelike_longcl_http11(self): to_send = "GET /filelike_longcl HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has not been closed def test_notfilelike_http11(self): to_send = "GET /notfilelike HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has not been closed def test_notfilelike_nocl_http11(self): to_send = "GET /notfilelike_nocl HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has been closed (no content-length) self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_notfilelike_shortcl_http11(self): to_send = "GET /notfilelike_shortcl HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() for t in range(0, 2): self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, 1) self.assertEqual(cl, len(response_body)) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377' in response_body) # connection has not been closed def test_notfilelike_longcl_http11(self): to_send = "GET /notfilelike_longcl HTTP/1.1\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.1') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body) + 10) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_filelike_http10(self): to_send = "GET /filelike HTTP/1.0\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_filelike_nocl_http10(self): to_send = "GET /filelike_nocl HTTP/1.0\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_notfilelike_http10(self): to_send = "GET /notfilelike HTTP/1.0\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') cl = int(headers['content-length']) self.assertEqual(cl, len(response_body)) ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has been closed self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) def test_notfilelike_nocl_http10(self): to_send = "GET /notfilelike_nocl HTTP/1.0\n\n" to_send = tobytes(to_send) self.connect() self.sock.send(to_send) fp = self.sock.makefile('rb', 0) line, headers, response_body = read_http(fp) self.assertline(line, '200', 'OK', 'HTTP/1.0') ct = headers['content-type'] self.assertEqual(ct, 'image/jpeg') self.assertTrue(b'\377\330\377' in response_body) # connection has been closed (no content-length) self.send_check_error(to_send) self.assertRaises(ConnectionClosed, read_http, fp) class TcpEchoTests(EchoTests, TcpTests, unittest.TestCase): pass class TcpPipeliningTests(PipeliningTests, TcpTests, unittest.TestCase): pass class TcpExpectContinueTests(ExpectContinueTests, TcpTests, unittest.TestCase): pass class TcpBadContentLengthTests( BadContentLengthTests, TcpTests, unittest.TestCase): pass class TcpNoContentLengthTests( NoContentLengthTests, TcpTests, unittest.TestCase): pass class TcpWriteCallbackTests(WriteCallbackTests, TcpTests, unittest.TestCase): pass class TcpTooLargeTests(TooLargeTests, TcpTests, unittest.TestCase): pass class TcpInternalServerErrorTests( InternalServerErrorTests, TcpTests, unittest.TestCase): pass class TcpFileWrapperTests(FileWrapperTests, TcpTests, unittest.TestCase): pass if hasattr(socket, 'AF_UNIX'): class FixtureUnixWSGIServer(server.UnixWSGIServer): """A version of UnixWSGIServer that relays back what it's bound to. """ def __init__(self, application, queue, **kw): # pragma: no cover # Coverage doesn't see this as it's ran in a separate process. # To permit parallel testing, use a PID-dependent socket. kw['unix_socket'] = '/tmp/waitress.test-%d.sock' % os.getpid() super(FixtureUnixWSGIServer, self).__init__(application, **kw) queue.put(self.socket.getsockname()) class UnixTests(SubprocessTests): server = FixtureUnixWSGIServer def make_http_connection(self): return UnixHTTPConnection(self.bound_to) def stop_subprocess(self): super(UnixTests, self).stop_subprocess() cleanup_unix_socket(self.bound_to) def send_check_error(self, to_send): # Unlike inet domain sockets, Unix domain sockets can trigger a # 'Broken pipe' error when the socket it closed. try: self.sock.send(to_send) except socket.error as exc: self.assertEqual(get_errno(exc), errno.EPIPE) class UnixEchoTests(EchoTests, UnixTests, unittest.TestCase): pass class UnixPipeliningTests(PipeliningTests, UnixTests, unittest.TestCase): pass class UnixExpectContinueTests( ExpectContinueTests, UnixTests, unittest.TestCase): pass class UnixBadContentLengthTests( BadContentLengthTests, UnixTests, unittest.TestCase): pass class UnixNoContentLengthTests( NoContentLengthTests, UnixTests, unittest.TestCase): pass class UnixWriteCallbackTests( WriteCallbackTests, UnixTests, unittest.TestCase): pass class UnixTooLargeTests(TooLargeTests, UnixTests, unittest.TestCase): pass class UnixInternalServerErrorTests( InternalServerErrorTests, UnixTests, unittest.TestCase): pass class UnixFileWrapperTests(FileWrapperTests, UnixTests, unittest.TestCase): pass def parse_headers(fp): """Parses only RFC2822 headers from a file pointer. """ headers = {} while True: line = fp.readline() if line in (b'\r\n', b'\n', b''): break line = line.decode('iso-8859-1') name, value = line.strip().split(':', 1) headers[name.lower().strip()] = value.lower().strip() return headers class UnixHTTPConnection(httplib.HTTPConnection): """Patched version of HTTPConnection that uses Unix domain sockets. """ def __init__(self, path): httplib.HTTPConnection.__init__(self, 'localhost') self.path = path def connect(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(self.path) self.sock = sock class ConnectionClosed(Exception): pass # stolen from gevent def read_http(fp): # pragma: no cover try: response_line = fp.readline() except socket.error as exc: if get_errno(exc) in (10053, 10054, 104): raise ConnectionClosed raise if not response_line: raise ConnectionClosed header_lines = [] while True: line = fp.readline() if line in (b'\r\n', b'\n', b''): break else: header_lines.append(line) headers = dict() for x in header_lines: x = x.strip() if not x: continue key, value = x.split(b': ', 1) key = key.decode('iso-8859-1').lower() value = value.decode('iso-8859-1') assert key not in headers, "%s header duplicated" % key headers[key] = value if 'content-length' in headers: num = int(headers['content-length']) body = b'' left = num while left > 0: data = fp.read(left) if not data: break body += data left -= len(data) else: # read until EOF body = fp.read() return response_line, headers, body # stolen from gevent def get_errno(exc): # pragma: no cover """ Get the error code out of socket.error objects. socket.error in <2.5 does not have errno attribute socket.error in 3.x does not allow indexing access e.args[0] works for all. There are cases when args[0] is not errno. i.e. http://bugs.python.org/issue6471 Maybe there are cases when errno is set, but it is not the first argument? """ try: if exc.errno is not None: return exc.errno except AttributeError: pass try: return exc.args[0] except IndexError: return None def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in range(0, len(l), n): yield l[i:i + n]
SamuelDSR/YouCompleteMe-Win7-GVIM
third_party/waitress/waitress/tests/test_functional.py
Python
gpl-3.0
56,930
import pytest from services.sf_services.set_hima_test import set_hima_test def test_set_hima(): student_id = 23911699 level_code = "3" set_hima_test(student_id, level_code) if __name__ == "__main__": pytest.main()
hongbaby/service-automation
services/unittest/sf_service_test.py
Python
mit
234
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import json import os import copy import unittest from collections import defaultdict import pytest from monty.json import MontyDecoder from pymatgen.analysis.phase_diagram import PhaseDiagram from pymatgen.entries.computed_entries import ( CompositionEnergyAdjustment, ComputedEntry, ComputedStructureEntry, ConstantEnergyAdjustment, EnergyAdjustment, GibbsComputedStructureEntry, ManualEnergyAdjustment, TemperatureEnergyAdjustment, ) from pymatgen.io.vasp.outputs import Vasprun from pymatgen.util.testing import PymatgenTest filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "vasprun.xml") vasprun = Vasprun(filepath) def test_energyadjustment(): ea = EnergyAdjustment(10) assert ea.name == "Manual adjustment" assert ea.cls == {} ead = ea.as_dict() ea2 = EnergyAdjustment.from_dict(ead) assert str(ead) == str(ea2.as_dict()) def test_manual_energy_adjustment(): ea = ManualEnergyAdjustment(10) assert ea.name == "Manual energy adjustment" assert ea.value == 10 assert ea.explain == "Manual energy adjustment (10.000 eV)" ead = ea.as_dict() ea2 = ManualEnergyAdjustment.from_dict(ead) assert str(ead) == str(ea2.as_dict()) def test_constant_energy_adjustment(): ea = ConstantEnergyAdjustment(8) assert ea.name == "Constant energy adjustment" assert ea.value == 8 assert ea.explain == "Constant energy adjustment (8.000 eV)" ead = ea.as_dict() ea2 = ConstantEnergyAdjustment.from_dict(ead) assert str(ead) == str(ea2.as_dict()) def test_composition_energy_adjustment(): ea = CompositionEnergyAdjustment(2, 2, uncertainty_per_atom=0, name="H") assert ea.name == "H" assert ea.value == 4 assert ea.explain == "Composition-based energy adjustment (2.000 eV/atom x 2 atoms)" ead = ea.as_dict() ea2 = CompositionEnergyAdjustment.from_dict(ead) assert str(ead) == str(ea2.as_dict()) def test_temp_energy_adjustment(): ea = TemperatureEnergyAdjustment(-0.1, 298, 5, uncertainty_per_deg=0, name="entropy") assert ea.name == "entropy" assert ea.value == -0.1 * 298 * 5 assert ea.n_atoms == 5 assert ea.temp == 298 assert ea.explain == "Temperature-based energy adjustment (-0.1000 eV/K/atom x 298 K x 5 atoms)" ead = ea.as_dict() ea2 = TemperatureEnergyAdjustment.from_dict(ead) assert str(ead) == str(ea2.as_dict()) class ComputedEntryTest(unittest.TestCase): def setUp(self): self.entry = ComputedEntry( vasprun.final_structure.composition, vasprun.final_energy, parameters=vasprun.incar, ) self.entry2 = ComputedEntry({"Fe": 2, "O": 3}, 2.3) self.entry3 = ComputedEntry("Fe2O3", 2.3) self.entry4 = ComputedEntry("Fe2O3", 2.3, entry_id=1) self.entry5 = ComputedEntry("Fe6O9", 6.9) ea = ConstantEnergyAdjustment(-5, name="Dummy adjustment") self.entry6 = ComputedEntry("Fe6O9", 6.9, correction=-10) self.entry7 = ComputedEntry("Fe6O9", 6.9, energy_adjustments=[ea]) def test_energy(self): self.assertAlmostEqual(self.entry.energy, -269.38319884) self.entry.correction = 1.0 self.assertAlmostEqual(self.entry.energy, -268.38319884) self.assertAlmostEqual(self.entry3.energy_per_atom, 2.3 / 5) def test_composition(self): self.assertEqual(self.entry.composition.reduced_formula, "LiFe4(PO4)4") self.assertEqual(self.entry2.composition.reduced_formula, "Fe2O3") self.assertEqual(self.entry5.composition.reduced_formula, "Fe2O3") self.assertEqual(self.entry5.composition.get_reduced_formula_and_factor()[1], 3) def test_per_atom_props(self): entry = ComputedEntry("Fe6O9", 6.9) entry.energy_adjustments.append(CompositionEnergyAdjustment(-0.5, 9, uncertainty_per_atom=0.1, name="O")) self.assertAlmostEqual(entry.energy, 2.4) self.assertAlmostEqual(entry.energy_per_atom, 2.4 / 15) self.assertAlmostEqual(entry.uncorrected_energy, 6.9) self.assertAlmostEqual(entry.uncorrected_energy_per_atom, 6.9 / 15) self.assertAlmostEqual(entry.correction, -4.5) self.assertAlmostEqual(entry.correction_per_atom, -4.5 / 15) self.assertAlmostEqual(entry.correction_uncertainty, 0.9) self.assertAlmostEqual(entry.correction_uncertainty_per_atom, 0.9 / 15) def test_normalize(self): entry = ComputedEntry("Fe6O9", 6.9, correction=1) entry_formula = entry.normalize() self.assertEqual(entry_formula.composition.formula, "Fe2 O3") self.assertAlmostEqual(entry_formula.uncorrected_energy, 6.9 / 3) self.assertAlmostEqual(entry_formula.correction, 1 / 3) self.assertAlmostEqual(entry_formula.energy * 3, 6.9 + 1) self.assertAlmostEqual(entry_formula.energy_adjustments[0].value, 1 / 3) entry_atom = entry.normalize("atom") self.assertEqual(entry_atom.composition.formula, "Fe0.4 O0.6") self.assertAlmostEqual(entry_atom.uncorrected_energy, 6.9 / 15) self.assertAlmostEqual(entry_atom.correction, 1 / 15) self.assertAlmostEqual(entry_atom.energy * 15, 6.9 + 1) self.assertAlmostEqual(entry_atom.energy_adjustments[0].value, 1 / 15) def test_normalize_energy_adjustments(self): ealist = [ ManualEnergyAdjustment(5), ConstantEnergyAdjustment(5), CompositionEnergyAdjustment(1, 5, uncertainty_per_atom=0, name="Na"), TemperatureEnergyAdjustment(0.005, 100, 10, uncertainty_per_deg=0), ] entry = ComputedEntry("Na5Cl5", 6.9, energy_adjustments=ealist) assert entry.correction == 20 normed_entry = entry.normalize() assert normed_entry.correction == 4 for ea in normed_entry.energy_adjustments: assert ea.value == 1 def test_to_from_dict(self): d = self.entry.as_dict() e = ComputedEntry.from_dict(d) self.assertEqual(self.entry, e) self.assertAlmostEqual(e.energy, -269.38319884) def test_to_from_dict_with_adjustment(self): """ Legacy case where adjustment was provided manually """ d = self.entry6.as_dict() e = ComputedEntry.from_dict(d) self.assertAlmostEqual(e.uncorrected_energy, 6.9) self.assertEqual(e.energy_adjustments[0].value, self.entry6.energy_adjustments[0].value) def test_to_from_dict_with_adjustment_2(self): """ Modern case where correction was provided manually """ d = self.entry7.as_dict() e = ComputedEntry.from_dict(d) self.assertAlmostEqual(e.uncorrected_energy, 6.9) self.assertEqual(e.energy_adjustments[0].value, self.entry7.energy_adjustments[0].value) def test_to_from_dict_with_adjustment_3(self): """ Legacy case where the entry was serialized before the energy_adjustment attribute was part of ComputedEntry """ # same as entry6 d = { "@module": "pymatgen.entries.computed_entries", "@class": "ComputedEntry", "energy": 6.9, "composition": defaultdict(float, {"Fe": 6.0, "O": 9.0}), "parameters": {}, "data": {}, "entry_id": None, "correction": -10, } e = ComputedEntry.from_dict(d) self.assertAlmostEqual(e.uncorrected_energy, 6.9) self.assertAlmostEqual(e.correction, -10) assert len(e.energy_adjustments) == 1 def test_conflicting_correction_adjustment(self): """ Should raise a ValueError if a user tries to manually set both the correction and energy_adjustment, even if the values match. """ ea = ConstantEnergyAdjustment(-10, name="Dummy adjustment") with pytest.raises(ValueError, match="Argument conflict!"): ComputedEntry("Fe6O9", 6.9, correction=-10, energy_adjustments=[ea]) def test_entry_id(self): self.assertEqual(self.entry4.entry_id, 1) self.assertEqual(self.entry2.entry_id, None) def test_str(self): self.assertIsNotNone(str(self.entry)) def test_sulfide_energy(self): self.entry = ComputedEntry("BaS", -10.21249155) self.assertAlmostEqual(self.entry.energy, -10.21249155) self.assertAlmostEqual(self.entry.energy_per_atom, -10.21249155 / 2) self.entry.correction = 1.0 self.assertAlmostEqual(self.entry.energy, -9.21249155) def test_is_element(self): entry = ComputedEntry("Fe3", 2.3) self.assertTrue(entry.is_element) class ComputedStructureEntryTest(unittest.TestCase): def setUp(self): self.entry = ComputedStructureEntry(vasprun.final_structure, vasprun.final_energy, parameters=vasprun.incar) def test_energy(self): self.assertAlmostEqual(self.entry.energy, -269.38319884) self.entry.correction = 1.0 self.assertAlmostEqual(self.entry.energy, -268.38319884) def test_composition(self): self.assertEqual(self.entry.composition.reduced_formula, "LiFe4(PO4)4") def test_to_from_dict(self): d = self.entry.as_dict() e = ComputedStructureEntry.from_dict(d) self.assertEqual(self.entry, e) self.assertAlmostEqual(e.energy, -269.38319884) def test_str(self): self.assertIsNotNone(str(self.entry)) def test_to_from_dict_structure_with_adjustment_3(self): """ Legacy case where the structure entry was serialized before the energy_adjustment attribute was part of ComputedEntry """ # ComputedStructureEntry for Oxygen, mp-12957, as of April 2020 # with an arbitrary 1 eV correction added d = { "@module": "pymatgen.entries.computed_entries", "@class": "ComputedStructureEntry", "energy": -39.42116819, "composition": defaultdict(float, {"O": 8.0}), "parameters": { "run_type": "GGA", "is_hubbard": False, "pseudo_potential": { "functional": "PBE", "labels": ["O"], "pot_type": "paw", }, "hubbards": {}, "potcar_symbols": ["PBE O"], "oxide_type": "None", }, "data": {"oxide_type": "None"}, "entry_id": "mp-12957", "correction": 1, "structure": { "@module": "pymatgen.core.structure", "@class": "Structure", "charge": None, "lattice": { "matrix": [ [-1.7795583, 0.0, 3.86158265], [4.17564656, -3.03266995, -0.01184798], [4.17564656, 3.03266995, -0.01184798], ], "a": 4.251899376264673, "b": 5.160741380296335, "c": 5.160741380296335, "alpha": 71.97975354157973, "beta": 109.9211782454931, "gamma": 109.9211782454931, "volume": 97.67332322031668, }, "sites": [ { "species": [{"element": "O", "occu": 1}], "abc": [0.8531272, 0.15466029, 0.15466029], "xyz": [ -0.22657617390155504, -1.750215367360042e-17, 3.2907563697176516, ], "label": "O", "properties": {"magmom": 0.002}, }, { "species": [{"element": "O", "occu": 1}], "abc": [0.84038763, 0.71790132, 0.21754949], "xyz": [ 2.410593174641884, -1.5174019592685084, 3.234143088794756, ], "label": "O", "properties": {"magmom": -0.002}, }, { "species": [{"element": "O", "occu": 1}], "abc": [0.17255465, 0.21942628, 0.21942628], "xyz": [ 1.5254221229000986, -2.121360826524921e-18, 0.6611345262629937, ], "label": "O", "properties": {"magmom": 0.002}, }, { "species": [{"element": "O", "occu": 1}], "abc": [0.15961237, 0.78245051, 0.28209968], "xyz": [ 4.161145821004675, -1.5173989265985586, 0.6037435893572642, ], "label": "O", "properties": {"magmom": -0.002}, }, { "species": [{"element": "O", "occu": 1}], "abc": [0.84038763, 0.21754949, 0.71790132], "xyz": [ 2.410593174641884, 1.5174019592685082, 3.234143088794756, ], "label": "O", "properties": {"magmom": -0.002}, }, { "species": [{"element": "O", "occu": 1}], "abc": [0.82744535, 0.78057372, 0.78057372], "xyz": [ 5.046312697099901, -1.3574974398403584e-16, 3.176752163737006, ], "label": "O", "properties": {"magmom": 0.002}, }, { "species": [{"element": "O", "occu": 1}], "abc": [0.15961237, 0.28209968, 0.78245051], "xyz": [ 4.161145821004675, 1.5173989265985584, 0.6037435893572642, ], "label": "O", "properties": {"magmom": -0.002}, }, { "species": [{"element": "O", "occu": 1}], "abc": [0.1468728, 0.84533971, 0.84533971], "xyz": [ 6.798310993901555, -1.7769364890338579e-16, 0.5471303202823484, ], "label": "O", "properties": {"magmom": 0.002}, }, ], }, } e = ComputedEntry.from_dict(d) self.assertAlmostEqual(e.uncorrected_energy, -39.42116819) self.assertAlmostEqual(e.energy, -38.42116819) self.assertAlmostEqual(e.correction, 1) assert len(e.energy_adjustments) == 1 class GibbsComputedStructureEntryTest(unittest.TestCase): def setUp(self): self.temps = [300, 600, 900, 1200, 1500, 1800] self.struct = vasprun.final_structure self.num_atoms = self.struct.composition.num_atoms self.entries_with_temps = { temp: GibbsComputedStructureEntry( self.struct, -2.436, temp=temp, gibbs_model="SISSO", parameters=vasprun.incar, entry_id="test", ) for temp in self.temps } with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "Mn-O_entries.json")) as f: data = json.load(f) with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "structure_CO2.json")) as f: self.co2_struct = MontyDecoder().process_decoded(json.load(f)) self.mp_entries = [MontyDecoder().process_decoded(d) for d in data] def test_gf_sisso(self): energies = { 300: -56.21273010866969, 600: -51.52997063074788, 900: -47.29888391585979, 1200: -42.942338738866304, 1500: -37.793417248809774, 1800: -32.32513382051749, } for t in self.temps: self.assertAlmostEqual(self.entries_with_temps[t].energy, energies[t]) def test_interpolation(self): temp = 450 e = GibbsComputedStructureEntry(self.struct, -2.436, temp=temp) self.assertAlmostEqual(e.energy, -53.7243542548528) def test_expt_gas_entry(self): co2_entry = GibbsComputedStructureEntry(self.co2_struct, 0, temp=900) self.assertAlmostEqual(co2_entry.energy, -16.406560223724014) self.assertAlmostEqual(co2_entry.energy_per_atom, -1.3672133519770011) def test_from_entries(self): gibbs_entries = GibbsComputedStructureEntry.from_entries(self.mp_entries) self.assertIsNotNone(gibbs_entries) def test_from_pd(self): pd = PhaseDiagram(self.mp_entries) gibbs_entries = GibbsComputedStructureEntry.from_pd(pd) self.assertIsNotNone(gibbs_entries) def test_to_from_dict(self): test_entry = self.entries_with_temps[300] d = test_entry.as_dict() e = GibbsComputedStructureEntry.from_dict(d) self.assertEqual(test_entry, e) self.assertAlmostEqual(e.energy, test_entry.energy) def test_str(self): self.assertIsNotNone(str(self.entries_with_temps[300])) def test_normalize(self): for e in self.entries_with_temps.values(): entry = copy.deepcopy(e) normed_entry = entry.normalize(mode="atom") self.assertAlmostEqual(entry.uncorrected_energy, normed_entry.uncorrected_energy * self.num_atoms, 11) if __name__ == "__main__": # import sys;sys.argv = ['', 'Test.testName'] unittest.main()
vorwerkc/pymatgen
pymatgen/entries/tests/test_computed_entries.py
Python
mit
18,404
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test calculations to get interaction answer views.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from core.domain import calculation_registry from core.domain import exp_domain from core.tests import test_utils from extensions.answer_summarizers import models as answer_models class BaseCalculationUnitTests(test_utils.GenericTestBase): """Test cases for BaseCalculation.""" def test_requires_override_for_calculation(self): with self.assertRaises(NotImplementedError): answer_models.BaseCalculation().calculate_from_state_answers_dict( state_answers_dict={}) class CalculationUnitTestBase(test_utils.GenericTestBase): """Utility methods for testing calculations.""" # TODO(brianrodri, msl): Only non-zero answer-counts are tested. Should look # into adding coverage for answers with zero-frequencies. def _create_answer_dict( self, answer, time_spent_in_card=3.2, session_id='sid1', classify_category=exp_domain.EXPLICIT_CLASSIFICATION): return { 'answer': answer, 'time_spent_in_sec': time_spent_in_card, 'session_id': session_id, 'classification_categorization': classify_category, } def _create_state_answers_dict( self, answer_dicts_list, exploration_id='0', exploration_version=1, state_name='Welcome!', interaction_id='MultipleChoiceInput'): """Builds a simple state_answers_dict with optional default values.""" return { 'exploration_id': exploration_id, 'exploration_version': exploration_version, 'state_name': state_name, 'interaction_id': interaction_id, 'submitted_answer_list': answer_dicts_list, } def _get_calculation_instance(self): """Requires the existance of the class constant: CALCULATION_ID.""" if not hasattr(self, 'CALCULATION_ID'): raise NotImplementedError( 'Subclasses must provide a value for CALCULATION_ID.') return calculation_registry.Registry.get_calculation_by_id( self.CALCULATION_ID) def _perform_calculation(self, state_answers_dict): """Performs calculation on state_answers_dict and returns its output.""" calculation_instance = self._get_calculation_instance() state_answers_calc_output = ( calculation_instance.calculate_from_state_answers_dict( state_answers_dict)) self.assertEqual( state_answers_calc_output.calculation_id, self.CALCULATION_ID) return state_answers_calc_output.calculation_output class AnswerFrequenciesUnitTestCase(CalculationUnitTestBase): """Tests for arbitrary answer frequency calculations.""" CALCULATION_ID = 'AnswerFrequencies' def test_top_answers_without_ties(self): # Create 12 answers with different frequencies. answers = ( ['A'] * 12 + ['B'] * 11 + ['C'] * 10 + ['D'] * 9 + ['E'] * 8 + ['F'] * 7 + ['G'] * 6 + ['H'] * 5 + ['I'] * 4 + ['J'] * 3 + ['K'] * 2 + ['L']) answer_dicts_list = [self._create_answer_dict(a) for a in answers] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) # All 12 should be sorted. expected_calc_output = [ {'answer': 'A', 'frequency': 12}, {'answer': 'B', 'frequency': 11}, {'answer': 'C', 'frequency': 10}, {'answer': 'D', 'frequency': 9}, {'answer': 'E', 'frequency': 8}, {'answer': 'F', 'frequency': 7}, {'answer': 'G', 'frequency': 6}, {'answer': 'H', 'frequency': 5}, {'answer': 'I', 'frequency': 4}, {'answer': 'J', 'frequency': 3}, {'answer': 'K', 'frequency': 2}, {'answer': 'L', 'frequency': 1}, ] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) def test_answers_with_ties(self): """Ties are resolved by submission ordering: earlier ranks higher.""" answers = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L'] answer_dicts_list = [self._create_answer_dict(a) for a in answers] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) # All 12 should appear in-order. expected_calc_output = [ {'answer': 'A', 'frequency': 1}, {'answer': 'B', 'frequency': 1}, {'answer': 'C', 'frequency': 1}, {'answer': 'D', 'frequency': 1}, {'answer': 'E', 'frequency': 1}, {'answer': 'F', 'frequency': 1}, {'answer': 'G', 'frequency': 1}, {'answer': 'H', 'frequency': 1}, {'answer': 'I', 'frequency': 1}, {'answer': 'J', 'frequency': 1}, {'answer': 'K', 'frequency': 1}, {'answer': 'L', 'frequency': 1}, ] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) class Top5AnswerFrequenciesUnitTestCase(CalculationUnitTestBase): """Tests for Top 5 answer frequency calculations.""" CALCULATION_ID = 'Top5AnswerFrequencies' def test_top5_without_ties(self): """Simplest case: ordering is obvious.""" # Create 12 answers with different frequencies. answers = ( ['A'] * 12 + ['B'] * 11 + ['C'] * 10 + ['D'] * 9 + ['E'] * 8 + ['F'] * 7 + ['G'] * 6 + ['H'] * 5 + ['I'] * 4 + ['J'] * 3 + ['K'] * 2 + ['L']) answer_dicts_list = [self._create_answer_dict(a) for a in answers] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) # Only top 5 are kept. expected_calc_output = [ {'answer': 'A', 'frequency': 12}, {'answer': 'B', 'frequency': 11}, {'answer': 'C', 'frequency': 10}, {'answer': 'D', 'frequency': 9}, {'answer': 'E', 'frequency': 8}, ] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) def test_top5_with_ties(self): """Ties are resolved by submission ordering: earlier ranks higher.""" # Create 12 answers with same frequencies. answers = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L'] answer_dicts_list = [self._create_answer_dict(a) for a in answers] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) # Only first 5 are kept. expected_calc_output = [ {'answer': 'A', 'frequency': 1}, {'answer': 'B', 'frequency': 1}, {'answer': 'C', 'frequency': 1}, {'answer': 'D', 'frequency': 1}, {'answer': 'E', 'frequency': 1}, ] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) class Top10AnswerFrequenciesUnitTestCase(CalculationUnitTestBase): """Tests for Top 10 answer frequency calculations.""" CALCULATION_ID = 'Top10AnswerFrequencies' def test_top10_answers_without_ties(self): # Create 12 answers with different frequencies. answers = ( ['A'] * 12 + ['B'] * 11 + ['C'] * 10 + ['D'] * 9 + ['E'] * 8 + ['F'] * 7 + ['G'] * 6 + ['H'] * 5 + ['I'] * 4 + ['J'] * 3 + ['K'] * 2 + ['L']) answer_dicts_list = [self._create_answer_dict(a) for a in answers] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) # Only top 10 are kept. expected_calc_output = [ {'answer': 'A', 'frequency': 12}, {'answer': 'B', 'frequency': 11}, {'answer': 'C', 'frequency': 10}, {'answer': 'D', 'frequency': 9}, {'answer': 'E', 'frequency': 8}, {'answer': 'F', 'frequency': 7}, {'answer': 'G', 'frequency': 6}, {'answer': 'H', 'frequency': 5}, {'answer': 'I', 'frequency': 4}, {'answer': 'J', 'frequency': 3}, ] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) def test_top10_with_ties(self): """Ties are resolved by submission ordering: earlier ranks higher.""" # Create 12 answers with same frequencies. answers = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L'] answer_dicts_list = [self._create_answer_dict(a) for a in answers] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) # Only first 10 are kept. expected_calc_output = [ {'answer': 'A', 'frequency': 1}, {'answer': 'B', 'frequency': 1}, {'answer': 'C', 'frequency': 1}, {'answer': 'D', 'frequency': 1}, {'answer': 'E', 'frequency': 1}, {'answer': 'F', 'frequency': 1}, {'answer': 'G', 'frequency': 1}, {'answer': 'H', 'frequency': 1}, {'answer': 'I', 'frequency': 1}, {'answer': 'J', 'frequency': 1}, ] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) class FrequencyCommonlySubmittedElementsUnitTestCase(CalculationUnitTestBase): """This calculation only works on answers which are all lists.""" CALCULATION_ID = 'FrequencyCommonlySubmittedElements' def test_shared_answers(self): answer_dicts_list = [ self._create_answer_dict(['B', 'A']), self._create_answer_dict(['A', 'C']), self._create_answer_dict(['D']), self._create_answer_dict(['B', 'A']), ] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) expected_calc_output = [ {'answer': 'A', 'frequency': 3}, {'answer': 'B', 'frequency': 2}, {'answer': 'C', 'frequency': 1}, {'answer': 'D', 'frequency': 1}, ] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) def test_many_shared_answers(self): answers = ( ['A'] * 12 + ['B'] * 11 + ['C'] * 10 + ['D'] * 9 + ['E'] * 8 + ['F'] * 7 + ['G'] * 6 + ['H'] * 5 + ['I'] * 4 + ['J'] * 3 + ['K'] * 2 + ['L']) split_len = len(answers) // 4 answer_dicts_list = [ self._create_answer_dict(answers[:split_len * 1]), self._create_answer_dict(answers[split_len * 1:split_len * 2]), self._create_answer_dict(answers[split_len * 2:split_len * 3]), self._create_answer_dict(answers[split_len * 3:]), ] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) # Only top 10 are kept. expected_calc_output = [ {'answer': 'A', 'frequency': 12}, {'answer': 'B', 'frequency': 11}, {'answer': 'C', 'frequency': 10}, {'answer': 'D', 'frequency': 9}, {'answer': 'E', 'frequency': 8}, {'answer': 'F', 'frequency': 7}, {'answer': 'G', 'frequency': 6}, {'answer': 'H', 'frequency': 5}, {'answer': 'I', 'frequency': 4}, {'answer': 'J', 'frequency': 3}, ] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) class TopAnswersByCategorizationUnitTestCase(CalculationUnitTestBase): CALCULATION_ID = 'TopAnswersByCategorization' def test_empty_state_answers_dict(self): state_answers_dict = self._create_state_answers_dict([]) actual_calc_output = self._perform_calculation(state_answers_dict) expected_calc_output = {} self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) def test_only_one_category(self): answer_dicts_list = [ self._create_answer_dict( 'Hard A', classify_category=exp_domain.EXPLICIT_CLASSIFICATION), ] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) expected_calc_output = { 'explicit': [{'answer': 'Hard A', 'frequency': 1}], } self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) def test_many_categories(self): answer_dicts_list = [ # EXPLICIT. self._create_answer_dict( 'Explicit A', classify_category=exp_domain.EXPLICIT_CLASSIFICATION), self._create_answer_dict( 'Explicit B', classify_category=exp_domain.EXPLICIT_CLASSIFICATION), self._create_answer_dict( 'Explicit A', classify_category=exp_domain.EXPLICIT_CLASSIFICATION), # TRAINING DATA. self._create_answer_dict( 'Trained data A', classify_category=exp_domain.TRAINING_DATA_CLASSIFICATION), self._create_answer_dict( 'Trained data B', classify_category=exp_domain.TRAINING_DATA_CLASSIFICATION), self._create_answer_dict( 'Trained data B', classify_category=exp_domain.TRAINING_DATA_CLASSIFICATION), # STATS CLASSIFIER. self._create_answer_dict( 'Stats B', classify_category=exp_domain.STATISTICAL_CLASSIFICATION), self._create_answer_dict( 'Stats C', classify_category=exp_domain.STATISTICAL_CLASSIFICATION), self._create_answer_dict( 'Stats C', classify_category=exp_domain.STATISTICAL_CLASSIFICATION), self._create_answer_dict( 'Trained data B', classify_category=exp_domain.STATISTICAL_CLASSIFICATION), # DEFAULT OUTCOMES. self._create_answer_dict( 'Default C', classify_category=exp_domain.DEFAULT_OUTCOME_CLASSIFICATION), self._create_answer_dict( 'Default C', classify_category=exp_domain.DEFAULT_OUTCOME_CLASSIFICATION), self._create_answer_dict( 'Default B', classify_category=exp_domain.DEFAULT_OUTCOME_CLASSIFICATION), ] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) expected_calc_output = { 'explicit': [ {'answer': 'Explicit A', 'frequency': 2}, {'answer': 'Explicit B', 'frequency': 1}, ], 'training_data_match': [ {'answer': 'Trained data B', 'frequency': 2}, {'answer': 'Trained data A', 'frequency': 1}, ], 'statistical_classifier': [ {'answer': 'Stats C', 'frequency': 2}, {'answer': 'Stats B', 'frequency': 1}, {'answer': 'Trained data B', 'frequency': 1}, ], 'default_outcome': [ {'answer': 'Default C', 'frequency': 2}, {'answer': 'Default B', 'frequency': 1}, ], } self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) class TopNUnresolvedAnswersByFrequency(CalculationUnitTestBase): CALCULATION_ID = 'TopNUnresolvedAnswersByFrequency' def test_empty_state_answers_dict(self): state_answers_dict = self._create_state_answers_dict([]) actual_calc_output = self._perform_calculation(state_answers_dict) expected_calc_output = [] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output) def test_unresolved_answers_list(self): answer_dicts_list = [ # EXPLICIT. self._create_answer_dict( 'Explicit A', classify_category=exp_domain.EXPLICIT_CLASSIFICATION), self._create_answer_dict( 'Explicit B', classify_category=exp_domain.EXPLICIT_CLASSIFICATION), self._create_answer_dict( 'Explicit A', classify_category=exp_domain.EXPLICIT_CLASSIFICATION), # TRAINING DATA. self._create_answer_dict( 'Trained data A', classify_category=exp_domain.TRAINING_DATA_CLASSIFICATION), self._create_answer_dict( 'Trained data B', classify_category=exp_domain.TRAINING_DATA_CLASSIFICATION), self._create_answer_dict( 'Trained data B', classify_category=exp_domain.TRAINING_DATA_CLASSIFICATION), # STATS CLASSIFIER. self._create_answer_dict( 'Stats B', classify_category=exp_domain.STATISTICAL_CLASSIFICATION), self._create_answer_dict( 'Stats C', classify_category=exp_domain.STATISTICAL_CLASSIFICATION), self._create_answer_dict( 'Stats C', classify_category=exp_domain.STATISTICAL_CLASSIFICATION), self._create_answer_dict( 'Explicit B', classify_category=exp_domain.STATISTICAL_CLASSIFICATION), # EXPLICIT. self._create_answer_dict( 'Trained data B', classify_category=exp_domain.EXPLICIT_CLASSIFICATION), # DEFAULT OUTCOMES. self._create_answer_dict( 'Default C', classify_category=exp_domain.DEFAULT_OUTCOME_CLASSIFICATION), self._create_answer_dict( 'Default C', classify_category=exp_domain.DEFAULT_OUTCOME_CLASSIFICATION), self._create_answer_dict( 'Default B', classify_category=exp_domain.DEFAULT_OUTCOME_CLASSIFICATION), # EXPLICIT. self._create_answer_dict( 'Default B', classify_category=exp_domain.EXPLICIT_CLASSIFICATION), # STATS CLASSIFIER. self._create_answer_dict( 'Default B', classify_category=exp_domain.STATISTICAL_CLASSIFICATION), ] state_answers_dict = self._create_state_answers_dict(answer_dicts_list) actual_calc_output = self._perform_calculation(state_answers_dict) expected_calc_output = [ {'answer': 'Default B', 'frequency': 3}, {'answer': 'Explicit B', 'frequency': 2}, {'answer': 'Stats C', 'frequency': 2}, {'answer': 'Default C', 'frequency': 2}, {'answer': 'Stats B', 'frequency': 1}, ] self.assertEqual(actual_calc_output.to_raw_type(), expected_calc_output)
prasanna08/oppia
core/tests/build_sources/extensions/models_test.py
Python
apache-2.0
20,181
# -*- coding: utf-8 -*- # # # TheVirtualBrain-Framework Package. This package holds all Data Management, and # Web-UI helpful to run brain-simulations. To use it, you also need do download # TheVirtualBrain-Scientific Package (for simulators). See content of the # documentation-folder for more details. See also http://www.thevirtualbrain.org # # (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest") # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License version 2 as published by the Free # Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. You should have received a copy of the GNU General # Public License along with this program; if not, you can download it here # http://www.gnu.org/licenses/old-licenses/gpl-2.0 # # # CITATION: # When using The Virtual Brain for scientific publications, please cite it as follows: # # Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide, # Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013) # The Virtual Brain: a simulator of primate brain network dynamics. # Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010) # # """ .. moduleauthor:: Calin Pavel <[email protected]> """ import unittest import os import numpy as numpy from tvb.core.entities.file.files_helper import FilesHelper from tvb_test.datatypes.datatypes_factory import DatatypesFactory from tvb_test.core.base_testcase import TransactionalTestCase from tvb.core.entities.storage import dao from tvb.core.entities.transient.structure_entities import DataTypeMetaData from tvb.core.services.flow_service import FlowService from tvb.core.adapters.abcadapter import ABCAdapter from tvb.datatypes.time_series import TimeSeries from tvb.core.services.exceptions import OperationException import demo_data.nifti as demo_data class NIFTIImporterTest(TransactionalTestCase): """ Unit-tests for NIFTI importer. """ NII_FILE = os.path.join(os.path.dirname(demo_data.__file__), 'minimal.nii') GZ_NII_FILE = os.path.join(os.path.dirname(demo_data.__file__), 'minimal.nii.gz') TVB_NII_FILE = os.path.join(os.path.dirname(demo_data.__file__), 'tvb_nifti_demo_data.nii.gz') WRONG_NII_FILE = os.path.abspath(__file__) DEFAULT_ORIGIN = [[0.0, 0.0, 0.0]] UNKNOWN_STR = "unknown" def setUp(self): self.datatypeFactory = DatatypesFactory() self.test_project = self.datatypeFactory.get_project() self.test_user = self.datatypeFactory.get_user() def tearDown(self): """ Clean-up tests data """ FilesHelper().remove_project_structure(self.test_project.name) def _import(self, import_file_path=None): """ This method is used for importing data in NIFIT format :param import_file_path: absolute path of the file to be imported """ ### Retrieve Adapter instance group = dao.find_group('tvb.adapters.uploaders.nifti_importer', 'NIFTIImporter') importer = ABCAdapter.build_adapter(group) importer.meta_data = {DataTypeMetaData.KEY_SUBJECT: "", DataTypeMetaData.KEY_STATE: "RAW"} args = {'data_file': import_file_path} ### Launch import Operation FlowService().fire_operation(importer, self.test_user, self.test_project.id, **args) time_series = TimeSeries() data_types = FlowService().get_available_datatypes(self.test_project.id, time_series.module + "." + time_series.type) self.assertEqual(1, len(data_types), "Project should contain only one data type.") time_series = ABCAdapter.load_entity_by_gid(data_types[0][2]) self.assertTrue(time_series is not None, "TimeSeries should not be none") return time_series def test_import_demo_nii_data(self): """ This method tests import of a NIFTI file. """ time_series = self._import(self.TVB_NII_FILE) # Since self.assertAlmostEquals is not available on all machine # We compare floats as following self.assertTrue(abs(2.0 - time_series.sample_period) <= 0.001) self.assertEqual("sec", str(time_series.sample_period_unit)) self.assertEqual(0.0, time_series.start_time) self.assertTrue(time_series.title is not None) data_shape = time_series.read_data_shape() self.assertEquals(4, len(data_shape)) # We have only one entry for time dimension self.assertEqual(150, data_shape[0]) dimension_labels = time_series.labels_ordering self.assertTrue(dimension_labels is not None) self.assertEquals(4, len(dimension_labels)) volume = time_series.volume self.assertTrue(volume is not None) self.assertTrue(numpy.equal(self.DEFAULT_ORIGIN, volume.origin).all()) self.assertEquals("mm", volume.voxel_unit) def test_import_nii_without_time_dimension(self): """ This method tests import of a NIFTI file. """ time_series = self._import(self.NII_FILE) self.assertEqual(1.0, time_series.sample_period) self.assertEqual(self.UNKNOWN_STR, str(time_series.sample_period_unit)) self.assertEqual(0.0, time_series.start_time) self.assertTrue(time_series.title is not None) data_shape = time_series.read_data_shape() self.assertEquals(4, len(data_shape)) # We have only one entry for time dimension self.assertEqual(1, data_shape[0]) dimension_labels = time_series.labels_ordering self.assertTrue(dimension_labels is not None) self.assertEquals(4, len(dimension_labels)) volume = time_series.volume self.assertTrue(volume is not None) self.assertTrue(numpy.equal(self.DEFAULT_ORIGIN, volume.origin).all()) self.assertTrue(numpy.equal([3.0, 3.0, 3.0], volume.voxel_size).all()) self.assertEquals(self.UNKNOWN_STR, volume.voxel_unit) def test_import_nifti_compressed(self): """ This method tests import of a NIFTI file compressed in GZ format. """ time_series = self._import(self.GZ_NII_FILE) self.assertEqual(1.0, time_series.sample_period) self.assertEqual(self.UNKNOWN_STR, str(time_series.sample_period_unit)) self.assertEqual(0.0, time_series.start_time) self.assertTrue(time_series.title is not None) data_shape = time_series.read_data_shape() self.assertEquals(4, len(data_shape)) # We have only one entry for time dimension self.assertEqual(1, data_shape[0]) dimension_labels = time_series.labels_ordering self.assertTrue(dimension_labels is not None) self.assertEquals(4, len(dimension_labels)) volume = time_series.volume self.assertTrue(volume is not None) self.assertTrue(numpy.equal(self.DEFAULT_ORIGIN, volume.origin).all()) self.assertTrue(numpy.equal([3.0, 3.0, 3.0], volume.voxel_size).all()) self.assertEquals(self.UNKNOWN_STR, volume.voxel_unit) def test_import_wrong_nii_file(self): """ This method tests import of a file in a wrong format """ try: self._import(self.WRONG_NII_FILE) self.fail("Import should fail in case of a wrong NIFTI format.") except OperationException: # Expected exception pass def suite(): """ Gather all the tests in a test suite. """ test_suite = unittest.TestSuite() test_suite.addTest(unittest.makeSuite(NIFTIImporterTest)) return test_suite if __name__ == "__main__": #So you can run tests from this package individually. TEST_RUNNER = unittest.TextTestRunner() TEST_SUITE = suite() TEST_RUNNER.run(TEST_SUITE)
stuart-knock/tvb-framework
tvb_test/adapters/uploaders/nifti_importer_test.py
Python
gpl-2.0
8,124
# Fabfile to: # - install kismet # - list kismet interfaces # - start kismet # Import Fabric's API module from fabric.api import * from fabric.contrib.files import exists # User ssh config files env.use_ssh_config = 'True' # Set host list env.roledefs = { 'ntfk_via_te-ace-02': { 'hosts': [ 'ntfk0001.via.te-ace-02', #'ntfk0002.via.te-ace-02', 'ntfk0003.via.te-ace-02', 'ntfk0004.via.te-ace-02', 'ntfk0005.via.te-ace-02', 'ntfk0006.via.te-ace-02', 'ntfk0007.via.te-ace-02', 'ntfk0008.via.te-ace-02', 'ntfk0009.via.te-ace-02', 'ntfk0010.via.te-ace-02', 'ntfk0011.via.te-ace-02', 'ntfk0012.via.te-ace-02', 'ntfk0013.via.te-ace-02', ], }, 'ntfk_172': { 'hosts': [ # '172.16.1.188', '172.16.1.180', '172.16.1.194', '172.16.1.189', '172.16.1.190', '172.16.1.182', '172.16.1.197', '172.16.1.181', # '172.16.1.185', '172.16.1.179', '172.16.1.187', '172.16.1.186', ], }, 'ntfk_via_gateway': { 'hosts': [ 'ntfk@localhost:10001', 'ntfk@localhost:10002', 'ntfk@localhost:10003', 'ntfk@localhost:10004', 'ntfk@localhost:10005', 'ntfk@localhost:10006', 'ntfk@localhost:10007', 'ntfk@localhost:10008', 'ntfk@localhost:10009', 'ntfk@localhost:10010', 'ntfk@localhost:10011', 'ntfk@localhost:10012', 'ntfk@localhost:10013', ], }, } # Set the username env.user = "root" # Set the password [NOT RECOMMENDED] # env.password = "passwd" @parallel def cmd(command): """ Run a command """ run(command) @parallel def killall(process): """ Killall [process] """ run("killall %s" % (process)) def check_hostname(): """ Check hostname """ run("hostname") @parallel def run_bg(cmd, before=None, sockname="dtach", use_sudo=False): """Run a command in the background using dtach :param cmd: The command to run :param output_file: The file to send all of the output to. :param before: The command to run before the dtach. E.g. exporting environment variable :param sockname: The socket name to use for the temp file :param use_sudo: Whether or not to use sudo """ if not exists("/usr/bin/dtach"): sudo("apt-get install dtach") if before: cmd = "{}; dtach -n $(mktemp -u /tmp/{}.XXXX) {}".format( before, sockname, cmd) else: cmd = "dtach -n $(mktemp -u /tmp/{}.XXXX) {}".format(sockname, cmd) if use_sudo: return sudo(cmd) else: return run(cmd) def install_dependencies(): """ Installing Dependencies... """ run("apt -y install build-essential git libmicrohttpd-dev pkg-config zlib1g-dev libnl-3-dev libnl-genl-3-dev libcap-dev libpcap-dev libncurses5-dev libnm-dev libdw-dev libsqlite3-dev libprotobuf-dev libprotobuf-c-dev protobuf-compiler protobuf-c-compiler") def git_kismet(): """ Giting Kismt... """ run("git clone https://www.kismetwireless.net/git/kismet.git") def compile_kismet(): """ Compiling Kismet """ with cd('kismet'): run("export ALL_PROXY=formauth.basf.net:9090") run("./configure && make -j2 && make suidinstall") run("usermod -a -G kismet ntfk") def git_install_kismet(): """ Dependencies, git, compile... """ install_dependencies() git_kismet() compile_kismet() def list_kismet_wifi(): """ Listing Kismet WiFi Interfaces """ env.kismet_bindir = '/usr/local/bin' env.kismet_command = 'kismet_cap_linux_wifi' run("%s/%s --list" % (env.kismet_bindir, env.kismet_command)) def list_kismet_bluetooth(): """ Listing Kismet Bluetooth Interfaces """ env.kismet_bindir = '/usr/local/bin' env.kismet_command = 'kismet_cap_linux_bluetooth' run("%s/%s --list" % (env.kismet_bindir, env.kismet_command)) def list_kismet_interfaces(): """ Listing All Kismet Interfaces """ list_kismet_wifi() list_kismet_bluetooth() def start_kismet_wifi(): """ Start Kismet Wifi """ env.kismet_bindir = '/usr/local/bin' env.kismet_command = 'kismet_cap_linux_wifi' env.kismet_server = '10.216.49.201:3501' env.kismet_source = 'wlan2' env.kismet_type = 'rt2800usb' run("killall %s" % (env.kismet_command)) run_bg(cmd="%s/%s --connect=%s --source=%s:type=%s,name=$(hostname)_%s " % (env.kismet_bindir, env.kismet_command, env.kismet_server, env.kismet_source, env.kismet_type, env.kismet_source)) def start_kismet_bluetooth(): """ Start Kismet Bluetooth """ env.kismet_bindir = '/usr/local/bin' env.kismet_command = 'kismet_cap_linux_bluetooth' env.kismet_server = '10.216.49.201:3501' env.kismet_source = 'hci0' env.kismet_type = 'linuxhci' run("killall %s" % (env.kismet_command)) run_bg(cmd="%s/%s --connect=%s --source=%s:type=%s,name=$(hostname)_%s " % (env.kismet_bindir, env.kismet_command, env.kismet_server, env.kismet_source, env.kismet_type, env.kismet_source)) def test_new_stuff(): """ Test new stuff... """ env.kismet_bindir = '/usr/local/bin' env.kismet_command = 'kismet_cap_linux_bluetooth' env.kismet_server = '10.216.49.201:3501' env.kismet_source = 'hci0' env.kismet_type = 'linuxhci' #run("%(kismet_bindir)/%(kismet_command) --list" % env) #run("echo %(kismet_bindir)/%(kismet_command) --connect=%s --source=%(kismet_source):type=%(kismet_type),name=$(hostname)_%(kismet_source) --daemonize" % env ) #run("%s/%s --connect=%s --source=%s:type=%s,name=$(hostname)_%s --daemonize " % (env.kismet_bindir, env.kismet_command, env.kismet_server, env.kismet_source, env.kismet_type, env.kismet_source), pty=False) #run("echo nohup %s/%s --connect=%s --source=%s:type=%s,name=$(hostname)_%s --daemonize " % (env.kismet_bindir, env.kismet_command, env.kismet_server, env.kismet_source, env.kismet_type, env.kismet_source)) #run("nohup %s/%s --connect=%s --source=%s:type=%s,name=$(hostname)_%s --daemonize " % (env.kismet_bindir, env.kismet_command, env.kismet_server, env.kismet_source, env.kismet_type, env.kismet_source)) run("killall %s" % (env.kismet_command)) run_bg(cmd="%s/%s --connect=%s --source=%s:type=%s,name=$(hostname)_%s " % (env.kismet_bindir, env.kismet_command, env.kismet_server, env.kismet_source, env.kismet_type, env.kismet_source))
boisgada/NTFKit
fabric/kismet_fabfile.py
Python
gpl-3.0
7,243
#!/usr/bin/env python3 from PIL import Image import os.path import sys if __name__ == '__main__': img = Image.open(sys.argv[1]) img.load() name = os.path.splitext(os.path.basename(sys.argv[1]))[0] frames = 0 for i in range(65536): try: img.seek(i) except EOFError: break frames += 1 for n in range(frames): print('static u_char _%s_frame%d[] = {' % (name, n)) img.seek(n) pix = img.load() for y in range(img.size[1]): line = [] p = 1 for x in range(img.size[0]): if p == pix[x, y]: continue p = pix[x, y] line.append(x) line.insert(0, len(line)) print(' %s,' % ', '.join(map(str, line))) print('};') print('') print('static AnimSpanT %s = {' % name) print(' .width = %d, .height = %d,' % img.size) print(' .current = 0, .count = %d,' % frames) print(' .frame = {') for n in range(frames): print(' _%s_frame%d,' % (name, n)) print(' }') print('};')
cahirwpz/demoscene
effects/anim/data/gen-anim.py
Python
artistic-2.0
1,148
from model.group import Group def test_add_group(app, db, json_groups, check_ui): group = json_groups old_groups = db.get_group_list() app.group.create(group) #сделаем простую проверку, убедимся, что новый список длинее чем старый на ед-цу new_groups = db.get_group_list() old_groups.append(group) assert sorted(old_groups, key= Group.id_or_max) == sorted(new_groups, key=Group.id_or_max) if check_ui: assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
Lenkora/python_training
test/test_add_group.py
Python
apache-2.0
726