max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
test2/test2.py | kubatom/my_nemtiko_repo | 0 | 7100 | print('this is a test2 file')
| 0.96875 | 1 |
Source/Git/Experiments/git_annotate.py | cadappl/scm-workbench | 24 | 7101 | <reponame>cadappl/scm-workbench
#!/usr/bin/python3
import sys
import git
r = git.Repo( sys.argv[1] )
num = 0
for info in r.blame( 'HEAD', sys.argv[2] ):
num += 1
commit = info[0]
all_lines = info[1]
print( '%s %6d:%s' % (commit, num, all_lines[0]) )
for line in all_lines[1:]:
num += 1
print( '%*s %6d:%s' % (40, '', num, line) )
| 2.34375 | 2 |
configs/global_configs.py | HansikaPH/time-series-forecasting | 67 | 7102 | <gh_stars>10-100
# configs for the model training
class model_training_configs:
VALIDATION_ERRORS_DIRECTORY = 'results/validation_errors/'
INFO_FREQ = 1
# configs for the model testing
class model_testing_configs:
RNN_FORECASTS_DIRECTORY = 'results/rnn_forecasts/'
RNN_ERRORS_DIRECTORY = 'results/errors'
PROCESSED_RNN_FORECASTS_DIRECTORY = '/results/processed_rnn_forecasts/'
# configs for hyperparameter tuning(SMAC3)
class hyperparameter_tuning_configs:
SMAC_RUNCOUNT_LIMIT = 50
class gpu_configs:
log_device_placement = False
| 1.398438 | 1 |
openprocurement/blade/tests/auctions.py | imaginal/openprocurement.blade | 0 | 7103 | # -*- coding: utf-8 -*-
import unittest
from uuid import uuid4
from copy import deepcopy
from openprocurement.api.models import get_now
from openprocurement.edge.tests.base import AuctionBaseWebTest, test_award, test_auction_data, test_document, ROUTE_PREFIX
try:
import openprocurement.auctions.core as auctions_core
except ImportError:
auctions_core = None
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionResourceTest(AuctionBaseWebTest):
def test_empty_listing(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/auctions?opt_jsonp=callback')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions?opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions?opt_jsonp=callback&opt_pretty=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions?offset=2015-01-01T00:00:00+02:00&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
response = self.app.get('/auctions?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertEqual(response.json['next_page']['offset'], '')
self.assertNotIn('prev_page', response.json)
response = self.app.get('/auctions?feed=changes&offset=0', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Offset expired/invalid', u'location': u'params', u'name': u'offset'}
])
response = self.app.get('/auctions?feed=changes&descending=1&limit=10')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], [])
self.assertIn('descending=1', response.json['next_page']['uri'])
self.assertIn('limit=10', response.json['next_page']['uri'])
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertIn('limit=10', response.json['prev_page']['uri'])
def test_listing(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
for i in range(3):
offset = get_now().isoformat()
auctions.append(self.create_auction())
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
while True:
response = self.app.get('/auctions?offset={}'.format(offset))
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/auctions', params=[('opt_fields', 'status,enquiryPeriod')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod']))
self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri'])
response = self.app.get('/auctions?descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True))
response = self.app.get('/auctions?descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_auction_data2 = test_auction_data.copy()
test_auction_data2['mode'] = 'test'
self.create_auction(test_auction_data2)
while True:
response = self.app.get('/auctions?mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_changes(self):
response = self.app.get('/auctions?feed=changes')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
for i in range(3):
auctions.append(self.create_auction())
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions?feed=changes')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
response = self.app.get('/auctions?feed=changes&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('prev_page', response.json)
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status']))
self.assertIn('opt_fields=status', response.json['next_page']['uri'])
response = self.app.get('/auctions?feed=changes', params=[('opt_fields', 'status,enquiryPeriod')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified', u'status', u'enquiryPeriod']))
self.assertIn('opt_fields=status%2CenquiryPeriod', response.json['next_page']['uri'])
response = self.app.get('/auctions?feed=changes&descending=1')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions], reverse=True))
response = self.app.get('/auctions?feed=changes&descending=1&limit=2')
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 2)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 1)
response = self.app.get(response.json['next_page']['path'].replace(ROUTE_PREFIX, ''))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('descending=1', response.json['prev_page']['uri'])
self.assertEqual(len(response.json['data']), 0)
test_auction_data2 = test_auction_data.copy()
test_auction_data2['mode'] = 'test'
self.create_auction(test_auction_data2)
while True:
response = self.app.get('/auctions?feed=changes&mode=test')
self.assertEqual(response.status, '200 OK')
if len(response.json['data']) == 1:
break
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/auctions?feed=changes&mode=_all_')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 4)
def test_listing_draft(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
auctions = []
data = test_auction_data.copy()
data.update({'status': 'draft'})
for i in range(3):
auctions.append(self.create_auction(data))
ids = ','.join([i['id'] for i in auctions])
while True:
response = self.app.get('/auctions')
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
if len(response.json['data']) == 3:
break
self.assertEqual(len(response.json['data']), 3)
self.assertEqual(set(response.json['data'][0]), set([u'id', u'dateModified']))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in auctions]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in auctions]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in auctions]))
def test_get_auction(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], auction)
response = self.app.get('/auctions/{}?opt_jsonp=callback'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}?opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_auction_not_found(self):
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/auctions/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'}
])
response = self.app.patch_json(
'/auctions/some_id', {'data': {}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'auction_id'}
])
# put custom document object into database to check auction construction on non-Auction data
data = {'contract': 'test', '_id': uuid4().hex}
self.db.save(data)
response = self.app.get('/auctions/{}'.format(data['_id']), status=404)
self.assertEqual(response.status, '404 Not Found')
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionAwardResourceTest(AuctionBaseWebTest):
def test_listing(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], auction['awards'])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_jsonp=callback'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards?opt_jsonp=callback&opt_pretty=1'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
def test_listing_changes(self):
auction = self.create_auction()
data = self.db[auction['id']]
awards = data['awards']
for i in range(3):
award = deepcopy(test_award)
award['date'] = get_now().isoformat()
award['id'] = uuid4().hex
awards.append(award)
self.db.save(data)
ids = ','.join([i['id'] for i in awards])
response = self.app.get('/auctions/{}/awards'.format(auction['id']))
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), len(awards))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in awards]))
self.assertEqual(set([i['date'] for i in response.json['data']]), set([i['date'] for i in awards]))
self.assertEqual([i['date'] for i in response.json['data']], sorted([i['date'] for i in awards]))
def test_get_award(self):
auction = self.create_auction()
award = auction['awards'][0]
response = self.app.get('/auctions/{}/awards/{}'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], award)
response = self.app.get('/auctions/{}/awards/{}?opt_jsonp=callback'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}/awards/{}?opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_award_not_found(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards/some_id'.format(auction['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'award_id'}
])
def test_get_document_with_versions(self):
auction = self.create_auction()
data = self.db[auction['id']]
documents = data['documents']
for i in range(3):
document = deepcopy(test_document)
document['id'] = data['documents'][0]['id']
document['url'] += str(i)
document['dateModified'] = get_now().isoformat()
documents.append(document)
self.db.save(data)
versions = [{'dateModified': i['dateModified'], 'url': i['url']} for i in documents[:-1]]
response = self.app.get('/auctions/{}/documents/{}'.format(auction['id'], document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']['previousVersions']), len(versions))
self.assertEqual(response.json['data']['previousVersions'], versions)
@unittest.skipUnless(auctions_core, "Auctions is not reachable")
class AuctionAwardDocumentResourceTest(AuctionBaseWebTest):
def test_listing(self):
auction = self.create_auction()
award = auction['awards'][0]
document = award['documents'][0]
response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], award['documents'])
self.assertNotIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertNotIn('{\n "', response.body)
self.assertIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "', response.body)
self.assertNotIn('callback({', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents?opt_jsonp=callback&opt_pretty=1'.format(auction['id'], award['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('{\n "', response.body)
self.assertIn('callback({', response.body)
def test_listing_changes(self):
auction = self.create_auction()
data = self.db[auction['id']]
award = data['awards'][0]
award_documents = award['documents']
for i in range(3):
document = deepcopy(test_document)
document['dateModified'] = get_now().isoformat()
document['id'] = uuid4().hex
award_documents.append(document)
self.db.save(data)
ids = ','.join([i['id'] for i in award_documents])
response = self.app.get('/auctions/{}/awards/{}/documents'.format(auction['id'], award['id']))
self.assertTrue(ids.startswith(','.join([i['id'] for i in response.json['data']])))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), len(award_documents))
self.assertEqual(set([i['id'] for i in response.json['data']]), set([i['id'] for i in award_documents]))
self.assertEqual(set([i['dateModified'] for i in response.json['data']]), set([i['dateModified'] for i in award_documents]))
self.assertEqual([i['dateModified'] for i in response.json['data']], sorted([i['dateModified'] for i in award_documents]))
def test_get_award_document(self):
auction = self.create_auction()
award = auction['awards'][0]
award_document = award['documents'][0]
response = self.app.get('/auctions/{}/awards/{}/documents/{}'.format(auction['id'], award['id'], award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertDictEqual(response.json['data'], award_document)
response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_jsonp=callback'.format(auction['id'], award['id'],award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/javascript')
self.assertIn('callback({"data": {"', response.body)
response = self.app.get('/auctions/{}/awards/{}/documents/{}?opt_pretty=1'.format(auction['id'], award['id'], award_document['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIn('{\n "data": {\n "', response.body)
def test_award_document_not_found(self):
auction = self.create_auction()
response = self.app.get('/auctions/{}/awards/{}/documents/some_id'.format(auction['id'], auction['awards'][0]['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'document_id'}
])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AuctionResourceTest))
suite.addTest(unittest.makeSuite(AuctionAwardResourceTest))
suite.addTest(unittest.makeSuite(AuctionAwardDocumentResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 2.328125 | 2 |
webium/controls/select.py | kejkz/webium | 152 | 7104 | from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.remote.webelement import WebElement
class Select(WebElement):
"""
Implements logic to work with Web List UI elements
"""
@property
def is_multiple(self):
value = self.get_attribute('multiple')
return value is not None and not value == 'false'
def select_option(self, option):
"""
Performs selection of provided item from Web List
@params option - string item name
"""
items_list = self.get_options()
for item in items_list:
if item.get_attribute("value") == option:
item.click()
break
def get_options(self):
"""
Performs search for provided item in Web List
"""
return self.find_elements_by_tag_name('option')
def get_attribute_selected(self, attribute):
"""
Performs search of selected item from Web List
Return attribute of selected item
@params attribute - string attribute name
"""
items_list = self.get_options()
return next(iter([item.get_attribute(attribute) for item in items_list if item.is_selected()]), None)
def get_value_selected(self):
"""
Performs search of selected item from Web List
Return value of selected item
"""
return self.get_attribute_selected('value')
def get_text_selected(self):
"""
Performs search of selected item from Web List
Return text of selected item
"""
return self.get_attribute_selected('text')
def select_by_visible_text(self, text):
"""
Performs search of selected item from Web List
@params text - string visible text
"""
xpath = './/option[normalize-space(.) = {0}]'.format(self._escape_string(text))
opts = self.find_elements_by_xpath(xpath)
matched = False
for opt in opts:
self._set_selected(opt)
if not self.is_multiple:
return
matched = True
# in case the target option isn't found by xpath
# attempt to find it by direct comparison among options which contain at least the longest token from the text
if len(opts) == 0 and ' ' in text:
sub_string_without_space = self._get_longest_token(text)
if sub_string_without_space == "":
candidates = self.get_options()
else:
xpath = ".//option[contains(.,{0})]".format(self._escape_string(sub_string_without_space))
candidates = self.find_elements_by_xpath(xpath)
for candidate in candidates:
if text == candidate.text:
self._set_selected(candidate)
if not self.is_multiple:
return
matched = True
if not matched:
raise NoSuchElementException("Could not locate element with visible text: " + str(text))
@staticmethod
def _escape_string(value):
if '"' in value and "'" in value:
substrings = value.split('"')
result = ['concat(']
for substring in substrings:
result.append('"{0}"'.format(substring))
result.append(', \'"\', ')
result.pop()
if value.endswith('"'):
result.append(', \'"\'')
return ''.join(result) + ')'
if '"' in value:
return "'{0}'".format(value)
return '"{0}"'.format(value)
@staticmethod
def _get_longest_token(value):
items = value.split(' ')
longest = ''
for item in items:
if len(item) > len(longest):
longest = item
return longest
@staticmethod
def _set_selected(option):
if not option.is_selected():
option.click()
| 3.1875 | 3 |
mc/cookies/CookieManager.py | zy-sunshine/falkon-pyqt5 | 1 | 7105 | <filename>mc/cookies/CookieManager.py
from PyQt5.QtWidgets import QDialog
from PyQt5 import uic
from PyQt5.Qt import Qt
from PyQt5.Qt import QShortcut
from PyQt5.Qt import QKeySequence
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QInputDialog
from PyQt5.Qt import QDateTime
from PyQt5.Qt import QStyle
from PyQt5.Qt import QNetworkCookie
from PyQt5.QtWidgets import QTreeWidgetItem
from mc.common.globalvars import gVar
from mc.app.Settings import Settings
from mc.common import const
from mc.tools.TreeWidget import TreeWidget
from mc.tools.IconProvider import IconProvider
class HashableTreeWidgetItem(QTreeWidgetItem):
def __hash__(self):
return id(self)
class CookieManager(QDialog):
def __init__(self, parent=None):
'''
@param parent QWidget
'''
super().__init__(parent)
self._ui = uic.loadUi('mc/cookies/CookieManager.ui', self)
self._domainHash = {} # QHash<QString, QTreeWidgetItem>
self._itemHash = {} # QHash<QTreeWidgetItem, QNetworkCookie>
self.setAttribute(Qt.WA_DeleteOnClose)
gVar.appTools.centerWidgetOnScreen(self)
if self.isRightToLeft():
self._ui.cookieTree.headerItem().setTextAlignment(0, Qt.AlignRight | Qt.AlignVCenter)
self._ui.cookieTree.headerItem().setTextAlignment(1, Qt.AlignRight | Qt.AlignVCenter)
self._ui.cookieTree.setLayoutDirection(Qt.LeftToRight)
self._ui.whiteList.setLayoutDirection(Qt.LeftToRight)
self._ui.blackList.setLayoutDirection(Qt.LeftToRight)
# Stored Cookies
self._ui.cookieTree.currentItemChanged.connect(self._currentItemChanged)
self._ui.removeAll.clicked.connect(self._removeAll)
self._ui.removeOne.clicked.connect(self._remove)
self._ui.close.clicked.connect(lambda: self._close())
self._ui.close2.clicked.connect(lambda: self._close())
self._ui.close3.clicked.connect(lambda: self._close())
self._ui.search.textChanged.connect(self._filterString)
# Cookie Filtering
self._ui.whiteAdd.clicked.connect(self._addWhitelist)
self._ui.whiteRemove.clicked.connect(self._removeWhitelist)
self._ui.blackAdd.clicked.connect(self._addBlacklist)
self._ui.blackRemove.clicked.connect(self._removeBlacklist)
# Cookie Settings
settings = Settings()
settings.beginGroup('Cookie-Settings')
self._ui.saveCookies.setChecked(settings.value('allCookies', True))
self._ui.filter3rdParty.setChecked(settings.value('filterThirdPartyCookies', False))
self._ui.filterTracking.setChecked(settings.value('filterTrackingCookie', False))
self._ui.deleteCookiesOnClose.setChecked(settings.value('deleteCookiesOnClose', False))
self._ui.whiteList.addItems(settings.value('whitelist', []))
self._ui.blackList.addItems(settings.value('blacklist', []))
settings.endGroup()
if const.QTWEBENGINEWIDGETS_VERSION < const.QT_VERSION_CHECK(5, 11, 0):
self._ui.filter3rdParty.hide()
self._ui.search.setPlaceholderText(_('Search'))
self._ui.cookieTree.setDefaultItemShowMode(TreeWidget.ItemsCollapsed)
self._ui.cookieTree.sortItems(0, Qt.AscendingOrder)
self._ui.cookieTree.header().setDefaultSectionSize(220)
self._ui.cookieTree.setFocus()
self._ui.whiteList.setSortingEnabled(True)
self._ui.blackList.setSortingEnabled(True)
self._removeShortcut = QShortcut(QKeySequence('Del'), self)
self._removeShortcut.activated.connect(self._deletePressed)
self._ui.search.textChanged.connect(self._filterString)
cookieJar = gVar.app.cookieJar()
cookieJar.cookieAdded.connect(self._addCookie)
cookieJar.cookieRemoved.connect(self._removeCookie)
# Load cookies
for cookie in cookieJar.getAllCookies():
self._addCookie(cookie)
gVar.appTools.setWmClass('Cookies', self)
def _close(self):
super().close()
# private Q_SLOTS:
def _currentItemChanged(self, current, parent):
'''
@param: current QTreeWidgetItem
@param: parent QTreeWidgetItem
'''
if not current:
return
if not current.text(1):
self._ui.name.setText(_('<cookie not selected>'))
self._ui.value.setText(_("<cookie not selected>"))
self._ui.server.setText(_("<cookie not selected>"))
self._ui.path.setText(_("<cookie not selected>"))
self._ui.secure.setText(_("<cookie not selected>"))
self._ui.expiration.setText(_("<cookie not selected>"))
self._ui.removeOne.setText(_("Remove cookies"))
return
cookie = current.data(0, Qt.UserRole + 10)
self._ui.name.setText(cookie.name().data().decode())
self._ui.value.setText(cookie.value().data().decode())
self._ui.server.setText(cookie.domain())
self._ui.path.setText(cookie.path())
if cookie.isSecure():
self._ui.secure.setText(_('Secure only'))
else:
self._ui.secure.setText(_('All connections'))
if cookie.isSessionCookie():
self._ui.expiration.setText(_('Session cookie'))
else:
self._ui.expiration.setText(
QDateTime(cookie.expirationDate()).toString('hh:mm:ss dddd d. MMMM yyyy')
)
self._ui.removeOne.setText(_('Remove cookie'))
def _remove(self):
current = self._ui.cookieTree.currentItem()
if not current:
return
cookies = [] # QList<QNetworkCookie>
if current.childCount():
for idx in range(current.childCount()):
# QTreeWidgetItem
item = current.child(idx)
if item and item in self._itemHash:
cookies.append(self._itemHash[item])
elif current in self._itemHash:
cookies.append(self._itemHash[current])
cookieJar = gVar.app.cookieJar()
for cookie in cookies:
cookieJar.deleteCookie(cookie)
def _removeAll(self):
button = QMessageBox.warning(self, _('Confirmation'),
_('Are you sure you want to delete all cookies on your computer?'),
QMessageBox.Yes | QMessageBox.No)
if button != QMessageBox.Yes:
return
gVar.app.cookieJar().deleteAllCookies()
self._itemHash.clear()
self._domainHash.clear()
self._ui.cookieTree.clear()
def _addWhitelist(self):
server, ok = QInputDialog.getText(self, _('Add to whitelist'),
_('Server:'))
if not server:
return
if self._ui.blackList.findItems(server, Qt.MatchFixedString):
QMessageBox.information(self, _('Already blacklisted!'),
_("The server \"%s\" is already in blacklist, please remove it first.") % server)
return
if not self._ui.whiteList.findItems(server, Qt.MatchFixedString):
self._ui.whiteList.addItem(server)
def _removeWhitelist(self):
item = self._ui.whiteList.currentItem()
self._removeTreeItem(self._ui.whiteList, item)
def _addBlacklist(self):
server, ok = QInputDialog.getText(self, _('Add to blacklist'),
_('Server:'))
self._addBlacklistByServer(server)
def _removeBlacklist(self):
item = self._ui.blackList.currentItem()
self._removeTreeItem(self._ui.blackList, item)
def _deletePressed(self):
if self._ui.cookieTree.hasFocus():
self._remove()
elif self._ui.whiteList.hasFocus():
self._removeWhitelist()
elif self._ui.blackList.hasFocus():
self._removeBlacklist()
def _filterString(self, string):
'''
@param: string QString
'''
print('=====>', string)
if not string:
for idx in range(self._ui.cookieTree.topLevelItemCount()):
item = self._ui.cookieTree.topLevelItem(idx)
item.setHidden(False)
item.setExpanded(self._ui.cookieTree.defaultItemShowMode() == TreeWidget.ItemsExpanded)
else:
strLower = string.lower()
for idx in range(self._ui.cookieTree.topLevelItemCount()):
item = self._ui.cookieTree.topLevelItem(idx)
text = '.' + item.text(0)
item.setHidden(text.lower() not in strLower)
item.setExpanded(True)
def _addCookie(self, cookie):
'''
@param: cookie QNetworkCookie
'''
item = None # QTreeWidgetItem
domain = self._cookieDomain(cookie)
findParent = self._domainHash.get(domain)
if findParent:
item = HashableTreeWidgetItem(findParent)
else:
newParent = HashableTreeWidgetItem(self._ui.cookieTree)
newParent.setText(0, domain)
newParent.setIcon(0, IconProvider.standardIcon(QStyle.SP_DirIcon))
newParent.setData(0, Qt.UserRole + 10, cookie.domain())
self._ui.cookieTree.addTopLevelItem(newParent)
self._domainHash[domain] = newParent
item = HashableTreeWidgetItem(newParent)
cookie = QNetworkCookie(cookie)
item.setText(0, '.' + domain)
item.setText(1, cookie.name().data().decode())
item.setData(0, Qt.UserRole + 10, cookie)
self._ui.cookieTree.addTopLevelItem(item)
self._itemHash[item] = cookie
def _removeCookie(self, cookie):
'''
@param: cookie QNetworkCookie
'''
# QTreeWidgetItem
item = self._cookieItem(cookie)
if not item:
return
self._itemHash.pop(item, None)
itemParent = item.parent()
if itemParent and itemParent.childCount() == 1:
self._domainHash.pop(self._cookieDomain(cookie), None)
self._removeTreeItem(self._ui.cookieTree, itemParent)
item = None
if item:
self._removeTreeItem(self._ui.cookieTree, item)
def _removeTreeItem(self, tree, item):
if not item: return
(item.parent() or tree.invisibleRootItem()).removeChild(item)
# private:
# override
def closeEvent(self, event):
'''
@param event QCloseEvent
'''
whitelist = []
blacklist = []
for idx in range(self._ui.whiteList.count()):
item = self._ui.whiteList.item(idx)
whitelist.append(item.text())
for idx in range(self._ui.blackList.count()):
item = self._ui.blackList.item(idx)
blacklist.append(item.text())
settings = Settings()
settings.beginGroup('Cookie-Settings')
settings.setValue('allowCookies', self._ui.saveCookies.isChecked())
settings.setValue('filterThirdPartyCookies', self._ui.filter3rdParty.isChecked())
settings.setValue('filterTrackingCookie', self._ui.filterTracking.isChecked())
settings.setValue('deleteCookiesOnClose', self._ui.deleteCookiesOnClose.isChecked())
settings.setValue('whitelist', whitelist)
settings.setValue('blacklist', blacklist)
settings.endGroup()
gVar.app.cookieJar().loadSettings()
event.accept()
# override
def keyPressEvent(self, event):
'''
@param event QKeyEvent
'''
if event.key() == Qt.Key_Escape:
self._close()
super().keyPressEvent(event)
def _addBlacklistByServer(self, server):
'''
@param: server QString
'''
if not server:
return
if self._ui.whiteList.findItems(server, Qt.MatchFixedString):
QMessageBox.information(self, _('Already whitelisted!'),
_("The server \"%s\" is already in whitelist, please remove it first.") % server)
return
if not self._ui.blackList.findItems(server, Qt.MatchFixedString):
self._ui.blackList.addItem(server)
def _cookieDomain(self, cookie):
'''
@param: cookie QNetworkCookie
@return: QString
'''
domain = cookie.domain()
domain = domain.lstrip('.')
return domain
def _cookieItem(self, cookie):
'''
@param: cookie QNetworkCookie
@return: QTreeWidgetItem
'''
for key, val in self._itemHash.items():
if val == cookie:
return key
return None
| 2.15625 | 2 |
.circleci/process_submitted_data.py | dongbohu/cimr-d | 0 | 7106 | #!/usr/bin/env python3
import os
import sys
import logging
import subprocess
logging.basicConfig(level=logging.INFO)
root_dir = 'submitted_data'
submitted_file_split = set()
for dir_, _, files in os.walk(root_dir):
for file_name in files:
rel_dir = os.path.relpath(dir_, root_dir)
rel_file = os.path.join(root_dir, rel_dir, file_name)
submitted_file_split.add(rel_file)
for submitted_file in submitted_file_split:
if submitted_file.startswith('submitted_data'):
dir_name, data_type, file_name = submitted_file.split('/')
out_dir_name = 'processed_data'
if not os.path.isdir(out_dir_name):
os.makedirs(out_dir_name, exist_ok=True)
if not os.path.isdir(out_dir_name + '/' + data_type):
os.makedirs(out_dir_name + '/' + data_type, exist_ok=True)
outfile = submitted_file.replace(dir_name, out_dir_name)
if not os.path.isfile(outfile):
if not data_type == 'tad':
from cimr.processor.utils import Infiler
infile = Infiler(
data_type,
submitted_file,
genome_build='b38',
update_rsid=False,
outfile=str(outfile),
chunksize=700000
)
infile.read_file()
if data_type == 'eqtl':
from cimr.processor.query import Querier
genes = list(infile.list_genes())
queried = Querier(genes)
queried.form_query()
else:
logging.info(f' processed file already exists for {submitted_file}')
logging.info(f' if reprocessing, delete {outfile} and file a new pull request')
| 2.171875 | 2 |
common/enums.py | resourceidea/resourceideaapi | 1 | 7107 | import enum
class Status(enum.Enum):
"""Status enumeration."""
ACTIVE = 'ACTIVE'
DISABLED = 'DISABLED'
ARCHIVED = 'ARCHIVED'
DELETED = 'DELETED'
class ProgressStatus(enum.Enum):
"""Enumeration indicates the different
stages of the progress made on an engagement,
job or task."""
NOT_STARTED = 'NOT STARTED'
RUNNING = 'RUNNING'
IN_REVIEW = 'IN REVIEW'
REVIEWED = 'REVIEWED'
CLOSED = 'CLOSED'
| 3.09375 | 3 |
networks/mobilenet.py | softsys4ai/FlexiBO | 8 | 7108 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MobileNet 224 (2017)
# Paper: https://arxiv.org/pdf/1704.04861.pdf
import os
import tensorflow as tf
from tensorflow.keras import layers, Input, Model
def stem(inputs, alpha, n_filters,
filter_size):
""" Construct the stem group
inputs : input tensor
alpha : width multiplier
"""
# Convolutional block
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(inputs)
x = layers.Conv2D(n_filters, (filter_size, filter_size), strides=(2, 2), padding='valid')(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
# Depthwise Separable Convolution Block
x = depthwise_block(x, 64, alpha, (1, 1))
return x
def classifier(x, alpha, dropout, n_classes):
""" Construct the classifier group
x : input to the classifier
alpha : width multiplier
dropout : dropout percentage
n_classes : number of output classes
"""
# Flatten the feature maps into 1D feature maps (?, N)
x = layers.GlobalAveragePooling2D()(x)
# Reshape the feature maps to (?, 1, 1, 1024)
shape = (1, 1, int(1024 * alpha))
x = layers.Reshape(shape)(x)
# Perform dropout for preventing overfitting
x = layers.Dropout(dropout)(x)
# Use convolution for classifying (emulates a fully connected layer)
x = layers.Conv2D(n_classes, (1, 1), padding='same')(x)
x = layers.Activation('softmax')(x)
# Reshape the resulting output to 1D vector of number of classes
x = layers.Reshape((n_classes, ))(x)
return x
def depthwise_block(x, n_filters, alpha, strides):
""" Construct a Depthwise Separable Convolution block
x : input to the block
n_filters : number of filters
alpha : width multiplier
strides : strides
"""
# Apply the width filter to the number of feature maps
filters = int(n_filters * alpha)
# Strided convolution to match number of filters
if strides == (2, 2):
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(x)
padding = 'valid'
else:
padding = 'same'
# Depthwise Convolution
x = layers.DepthwiseConv2D((3, 3), strides, padding=padding)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
# Pointwise Convolution
x = layers.Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
return x
def get_configurable_hyperparams():
"""This function is used to ge the configurable hyperparameters
"""
import yaml
with open("cur_config.yaml") as fp:
cur_cfg=yaml.load(fp)
return (cur_cfg["cur_conf"][0], cur_cfg["cur_conf"][1], cur_cfg["cur_conf"][2],
cur_cfg["cur_conf"][3], cur_cfg["cur_conf"][4])
def get_data():
"""This function is used to get train and test data
"""
from tensorflow.keras.datasets import cifar10
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = (x_train / 255.0).astype(np.float32)
x_test = (x_test / 255.0).astype(np.float32)
return x_train, y_train, x_test, y_test
if __name__=="__main__":
# get configurable hyperparams
(stem_n_filters,
stem_filter_size
depthwise_block1_n_filters,
depthwise_block2_n_filters,
depthwise_block3_n_filters,
depthwise_block4_n_filters,)=get_configurable_hyperparams()
alpha = 1 # width multiplier
dropout = 0.5 # dropout percentage
n_classes = 1000 # number of classes
inputs = Input(shape=(224, 224, 3))
# Create the stem group
x = stem(inputs, alpha, stem_n_filters,
stem_filter_size)
# First Depth wise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block1_n_filters, alpha, strides=(2, 2))
x = depthwise_block(x, depthwise_block1_n_filters, alpha, strides=(1, 1))
# Second Depthwise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block2_n_filters, alpha, strides=(2, 2))
x = depthwise_block(x, depthwise_block2_n_filters, alpha, strides=(1, 1))
# Third Depthwise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block3_n_filters, alpha, strides=(2, 2))
for _ in range(5):
x = depthwise_block(x, depthwise_block3_n_filters, alpha, strides=(1, 1))
# Fourth Depthwise Separable Convolution Group
# Strided convolution - feature map size reduction
x = depthwise_block(x, depthwise_block4_n_filters, alpha, strides=(2, 2))
x = depthwise_block(x, depthwise_block4_n_filters, alpha, strides=(1, 1))
# Create the classifier
outputs = classifier(x, alpha, dropout, n_classes)
# Instantiate the Model
model = Model(inputs, outputs)
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.summary()
xtrain, ytrain, x_test, y_test=get_data()
# train model
model.fit(x_train, y_train, epochs=10,
batch_size=32, validation_split=0.1, verbose=1)
# save model
fmodel=os.path.join(os.getcwd(),"model.h5")
model.save(fmodel)
| 2.46875 | 2 |
info.py | altfool/mri_face_detection | 1 | 7109 | import numpy as np
img_dtype = np.float32
imgX, imgY, imgZ = (256, 256, 150)
imgs_path_withfaces = '../dataset/withfaces'
imgs_path_nofaces = '../dataset/nofaces'
imgX_dwt1, imgY_dwt1, imgZ_dwt1 = (128, 128, 75)
imgs_path_withfaces_dwt = './dataset/withfaces'
imgs_path_nofaces_dwt = './dataset/nofaces'
dwt_flag = (True, False)[0]
if dwt_flag:
imgX, imgY, imgZ = imgX_dwt1, imgY_dwt1, imgZ_dwt1
imgs_path_withfaces = imgs_path_withfaces_dwt
imgs_path_nofaces = imgs_path_nofaces_dwt
| 2.015625 | 2 |
biggan/paddorch/paddorch/vision/functional.py | zzz2010/Contrib | 20 | 7110 | <filename>biggan/paddorch/paddorch/vision/functional.py<gh_stars>10-100
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import collections
import random
import math
import cv2
import numbers
import numpy as np
if sys.version_info < (3, 3):
Sequence = collections.Sequence
Iterable = collections.Iterable
else:
Sequence = collections.abc.Sequence
Iterable = collections.abc.Iterable
__all__ = ['flip', 'resize', 'pad', 'rotate', 'to_grayscale']
def flip(image, code):
"""
Accordding to the code (the type of flip), flip the input image
Args:
image: Input image, with (H, W, C) shape
code: Code that indicates the type of flip.
-1 : Flip horizontally and vertically
0 : Flip vertically
1 : Flip horizontally
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms import functional as F
fake_img = np.random.rand(224, 224, 3)
# flip horizontally and vertically
F.flip(fake_img, -1)
# flip vertically
F.flip(fake_img, 0)
# flip horizontally
F.flip(fake_img, 1)
"""
return cv2.flip(image, flipCode=code)
def resize(img, size, interpolation=cv2.INTER_LINEAR):
"""
resize the input data to given size
Args:
input: Input data, could be image or masks, with (H, W, C) shape
size: Target size of input data, with (height, width) shape.
interpolation: Interpolation method.
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms import functional as F
fake_img = np.random.rand(256, 256, 3)
F.resize(fake_img, 224)
F.resize(fake_img, (200, 150))
"""
if isinstance(interpolation, Sequence):
interpolation = random.choice(interpolation)
if isinstance(size, int):
h, w = img.shape[:2]
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return cv2.resize(img, (ow, oh), interpolation=interpolation)
else:
oh = size
ow = int(size * w / h)
return cv2.resize(img, (ow, oh), interpolation=interpolation)
else:
return cv2.resize(img, tuple(size[::-1]), interpolation=interpolation)
def pad(img, padding, fill=(0, 0, 0), padding_mode='constant'):
"""Pads the given CV Image on all sides with speficified padding mode and fill value.
Args:
img (np.ndarray): Image to be padded.
padding (int|tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int|tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
``constant`` means padding with a constant value, this value is specified with fill.
``edge`` means padding with the last value at the edge of the image.
``reflect`` means padding with reflection of image (without repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in reflect mode
will result in ``[3, 2, 1, 2, 3, 4, 3, 2]``.
``symmetric`` menas pads with reflection of image (repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in symmetric mode
will result in ``[2, 1, 1, 2, 3, 4, 4, 3]``.
Returns:
numpy ndarray: Padded image.
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms.functional import pad
fake_img = np.random.rand(500, 500, 3).astype('float32')
fake_img = pad(fake_img, 2)
print(fake_img.shape)
"""
if not isinstance(padding, (numbers.Number, list, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, list, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
raise ValueError(
"Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
'Expected padding mode be either constant, edge, reflect or symmetric, but got {}'.format(padding_mode)
PAD_MOD = {
'constant': cv2.BORDER_CONSTANT,
'edge': cv2.BORDER_REPLICATE,
'reflect': cv2.BORDER_DEFAULT,
'symmetric': cv2.BORDER_REFLECT
}
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, collections.Sequence) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, collections.Sequence) and len(padding) == 4:
pad_left, pad_top, pad_right, pad_bottom = padding
if isinstance(fill, numbers.Number):
fill = (fill,) * (2 * len(img.shape) - 3)
if padding_mode == 'constant':
assert (len(fill) == 3 and len(img.shape) == 3) or (len(fill) == 1 and len(img.shape) == 2), \
'channel of image is {} but length of fill is {}'.format(img.shape[-1], len(fill))
img = cv2.copyMakeBorder(
src=img,
top=pad_top,
bottom=pad_bottom,
left=pad_left,
right=pad_right,
borderType=PAD_MOD[padding_mode],
value=fill)
return img
def rotate(img,
angle,
interpolation=cv2.INTER_LINEAR,
expand=False,
center=None):
"""Rotates the image by angle.
Args:
img (numpy.ndarray): Image to be rotated.
angle (float|int): In degrees clockwise order.
interpolation (int, optional):
interpolation: Interpolation method.
expand (bool|optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple|optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
Returns:
numpy ndarray: Rotated image.
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms.functional import rotate
fake_img = np.random.rand(500, 500, 3).astype('float32')
fake_img = rotate(fake_img, 10)
print(fake_img.shape)
"""
dtype = img.dtype
h, w, _ = img.shape
point = center or (w / 2, h / 2)
M = cv2.getRotationMatrix2D(point, angle=-angle, scale=1)
if expand:
if center is None:
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - point[0]
M[1, 2] += (nH / 2) - point[1]
dst = cv2.warpAffine(img, M, (nW, nH))
else:
xx = []
yy = []
for point in (np.array([0, 0, 1]), np.array([w - 1, 0, 1]),
np.array([w - 1, h - 1, 1]), np.array([0, h - 1, 1])):
target = np.dot(M, point)
xx.append(target[0])
yy.append(target[1])
nh = int(math.ceil(max(yy)) - math.floor(min(yy)))
nw = int(math.ceil(max(xx)) - math.floor(min(xx)))
M[0, 2] += (nw - w) / 2
M[1, 2] += (nh - h) / 2
dst = cv2.warpAffine(img, M, (nw, nh), flags=interpolation)
else:
dst = cv2.warpAffine(img, M, (w, h), flags=interpolation)
return dst.astype(dtype)
def to_grayscale(img, num_output_channels=1):
"""Converts image to grayscale version of image.
Args:
img (numpy.ndarray): Image to be converted to grayscale.
Returns:
numpy.ndarray: Grayscale version of the image.
if num_output_channels == 1, returned image is single channel
if num_output_channels == 3, returned image is 3 channel with r == g == b
Examples:
.. code-block:: python
import numpy as np
from paddle.incubate.hapi.vision.transforms.functional import to_grayscale
fake_img = np.random.rand(500, 500, 3).astype('float32')
fake_img = to_grayscale(fake_img)
print(fake_img.shape)
"""
if num_output_channels == 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
elif num_output_channels == 3:
img = cv2.cvtColor(
cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
else:
raise ValueError('num_output_channels should be either 1 or 3')
return img | 2.875 | 3 |
ground_battle.py | ashhansen6/minigames | 0 | 7111 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 13:38:35 2021
GROUND INVASION! The Game
@author: <NAME> (<EMAIL>)
"""
# Packages used:
import numpy as np
import pandas as pd
import random as rng
from termcolor import colored
# Defining starting forces
## Defenders:
def_force = 1250
def_reserves = 400
defenders = def_force + def_reserves
def_strength = def_force
def_guard = def_force
## Attackers:
att_force = 900
att_reserves = 1000
attackers = att_force + att_reserves
att_strength = att_force
att_guard = att_force
# Defining strategies:
## Defenders:
def_strat = ["draft", "turtle"]
### Draft
def draft(def_force, def_reserves):
global def_pair
global def_strength
global def_guard
# Defender Strategy Information
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("You hear news that a draft decree was issued...")
print("Intelligence suggests that there will be more enemy combatants.")
print("You expect the drafted soldiers to have decreased combat effectiveness.")
# Defender Strategy Effects
if def_reserves >= 100:
def_danger = def_force + 100
def_safe = def_reserves - 100
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
else:
def_danger = def_force + def_reserves
def_safe = 0
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
def_power = def_danger * 0.980
def_protection = def_danger * 0.95
def_deployment = [def_danger, def_safe, def_power, def_protection]
return(def_deployment)
### Turtle
def turtle(def_force, def_reserves):
global def_pair
global def_strength
global def_guard
# Defender Strategy Information
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("The defenders appear to bolster their defenses in preparation.")
print("Intelligence suggests that their defenses will be difficult to penetrate.")
print("It is likely that the defenders will try to keep soldiers out of harm's way.")
# Defender Strategy Effects
if def_force > 1100:
def_danger = def_force
def_safe = def_reserves + (def_danger - 1100)
def_danger = 1100
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
else:
def_danger = def_force
def_safe = def_reserves
print("Defender's fielded forces:", def_danger)
print("Defender's forces still in reserve:", def_safe)
def_power = def_danger * 0.975
def_protection = def_danger * 1.15
def_deployment = [def_danger, def_safe, def_power, def_protection]
return(def_deployment)
## Attackers:
att_strat = ["blitz", "guerilla"]
### Blitz
def blitz(att_force, att_reserves):
global att_pair
global att_strength
global att_guard
# Attacker Strategy Information
print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan"))
print("Your officers grimly accept your orders...")
print("There is an air of apprehension as the troops prepare to deploy.")
print("While offensive effectiveness will improve, heavier losses are expected.")
# Attacker Strategy Effects
if att_reserves >= 200:
att_danger = att_force + 200
att_safe = att_reserves - 200
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_safe)
else:
att_danger = att_force + att_reserves
att_safe = 0
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_reserves)
att_power = att_danger * 1.10
att_protection = att_danger * 0.90
att_deployment = [att_danger, att_safe, att_power, att_protection]
return(att_deployment)
### Guerilla
def guerilla(att_force, att_reserves):
global att_pair
global att_strength
global att_guard
# Attacker Strategy Information
print(colored("########## OFFICERS' REPORTS #########", on_color = "on_cyan"))
print("Your officers immediately begin plans to target strategic weak points.")
print("Soldiers move out in small forces and keep the enemy guessing.")
print("While not as effective offensively, troop survival rates should be higher.")
# Attacker Strategy Effects
if att_force > 750:
att_danger = att_force
att_safe = att_reserves + (att_force - 750)
att_danger = 750
else:
att_danger = att_force
att_safe = att_reserves
print("Attacker's fielded forces:", att_danger)
print("Attacker's forces still in reserve:", att_safe)
att_power = att_danger * 0.95
att_protection = att_danger * 1.25
att_deployment = [att_danger, att_safe, att_power, att_protection]
return(att_deployment)
# Ground Battle Event (Player == Attacker)
wave = 0
player = input("Attacker or Defender? [A/D]:")
while (attackers > 0) and (defenders > 0):
# Wave Information
wave = wave + 1
if wave == 1:
print("############################################################")
print("PREPARE FOR BATTLE! THE FIRST WAVE OF THE BATTLE BEGINS NOW.")
print("############################################################")
else:
print("########## WAVE:", wave, "##########")
print("#############################")
print("Defending force strength:", def_force)
print("Defending forces in reserve:", def_reserves)
print("Attacking force strength:", att_force)
print("Attacking forces in reserve:", att_reserves)
if player =="A":
# Active Player (Attacker)
att_strat_chosen = input(colored("How should we proceed, commander? [blitz/guerilla]:", "yellow"))
elif player == "D":
# CPU Attacker
att_strat_chosen = rng.choice(att_strat)
# Defender Setup
if player == "A":
# CPU Defender
if def_reserves > 0:
def_strat = ["none",
"draft", "draft", "draft", "draft", "draft", "draft",
"turtle", "turtle", "turtle"]
def_strat_chosen = rng.choice(def_strat)
else:
def_strat = ["none", "none",
"turtle", "turtle", "turtle" ,"turtle", "turtle", "turtle", "turtle", "turtle"]
def_strat_chosen = rng.choice(def_strat)
elif player == "D":
# Active Player (defender)
def_strat_chosen = input(colored("How should we proceed, commander? [draft/turtle]:", "yellow"))
if def_strat_chosen == "draft":
draft_results = draft(def_force, def_reserves)
def_force = draft_results[0]
def_reserves = draft_results[1]
def_strength = draft_results[2]
def_guard = draft_results[3]
elif def_strat_chosen == "turtle":
turtle_results = turtle(def_force, def_reserves)
def_force = turtle_results[0]
def_reserves = turtle_results[1]
def_strength = turtle_results[2]
def_guard = turtle_results[3]
elif def_strat_chosen == "none":
print(colored("########## INTELLIGENCE REPORT ##########", on_color = "on_cyan"))
print("It appears that the enemy will employ standard tactics...")
def_force = def_force
def_reserves = def_reserves
def_strength = def_force
def_guard = def_force
print("Defending force strength:", def_force)
print("Forces kept in reserve:", def_reserves)
# Attacker Setup
if att_strat_chosen == "blitz":
blitz_results = blitz(att_force, att_reserves)
att_force = blitz_results[0]
att_reserves = blitz_results[1]
att_strength = blitz_results[2]
att_guard = blitz_results[3]
elif att_strat_chosen == "guerilla":
guerilla_results = guerilla(att_force, att_reserves)
att_force = guerilla_results[0]
att_reserves = guerilla_results[1]
att_strength = guerilla_results[2]
att_guard = guerilla_results[3]
# Combat
# Attacker damage
def_guard = np.random.normal(def_guard, def_guard/10) * 0.50
att_strength = att_strength - def_guard
if att_strength < 0:
att_strength = 0
def_force = def_force - np.random.normal(att_strength, att_strength/10)//2 - (0.1*att_strength)//1
if def_force < 0:
def_force = 0
# Defender damage
att_guard = np.random.normal(att_guard, att_guard/10) * 0.50 - 0.1
def_strength = def_strength - att_guard
if def_strength < 0:
def_strength = 0
att_force = att_force - np.random.normal(def_strength, def_strength/10)//2 - (0.1*def_strength)//1
if att_force < 0:
att_force = 0
# Post-wave results:
print(colored("########## POST-WAVE RESULTS ##########", on_color = "on_cyan"))
print(colored("Defenders:", on_color = "on_blue"))
print("Surviving defensive forces:", def_force)
print("Defenseive forces kept in reserve:", def_reserves)
print("Defender strength estimate:", def_strength)
print("Defender guard estimate:", def_guard)
print(colored("Attackers:", on_color = "on_red"))
print("Surviving attacker forces:", att_force)
print("Attacker forces kept in reserve:", att_reserves)
print("Attacker strength estimate:", att_strength)
print("Attacker guard estimate:", att_guard)
# Reset allocations
# Defender reallocations:
def_reserves = def_reserves + def_force
def_force = 0
if def_reserves >= 1250:
def_reserves = def_reserves - 1250
def_force = 1250
def_guard = def_force
else:
def_force = def_reserves
def_reserves = 0
def_guard = def_force
# Attacker reallocations:
att_reserves = att_reserves + att_force
att_force = 0
if att_reserves >= 900:
att_reserves = att_reserves - 900
att_force = 900
att_guard = att_force
else:
att_force = att_reserves
att_reserves = 0
att_guard = att_force
defenders = def_force + def_reserves
attackers = att_force + att_reserves
# End of wave conditionals
if (attackers > 0) and (defenders > 0) and (player == "A"):
fightflight = input(colored("Continue or retreat?: [continue/retreat]:", "yellow"))
if fightflight == "retreat":
print(colored("########## WITHDRAWAL ##########", on_color = "on_blue"))
print("You choose to withdraw your troops...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
break
else:
print("The battle will continue next turn...")
elif attackers <= 0 and player == "A":
print(colored("########## FAILURE! ##########", on_color = "on_red"))
print("Your assault has been repelled!")
print("You return home, wondering what punishment for your failure awaits...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
elif defenders <= 0 and player == "A":
print(colored("########## SUCCESS! ##########", on_color = "on_green"))
print("The defenders have been routed!")
print("You may now decide the fate of the defending population...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", attackers)
print("Total losses:", (1900 - attackers))
print("Survival rate:", (attackers)/1900)
print("Total assault waves:", wave)
elif (attackers > 0) and (defenders > 0) and (player == "D"):
fightflight = input(colored("Defend or retreat?: [defend/retreat]:", "yellow"))
if fightflight == "retreat":
print(colored("########## WITHDRAWAL ##########", on_color = "on_blue"))
print("You choose to withdraw your troops from the region...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1900 - defenders))
print("Survival rate:", (defenders)/1900)
print("Total assault waves:", wave)
break
else:
print("The battle will continue next turn...")
elif defenders <= 0 and player == "D":
print(colored("########## FAILURE! ##########", on_color = "on_red"))
print("Your defense has been broken!")
print("Enemy troops now occupy your lands and have claimed dominion...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1650 - defenders))
print("Survival rate:", (defenders)/1650)
print("Total assault waves:", wave)
elif attackers <= 0 and player == "D":
print(colored("########## SUCCESS! ##########", on_color = "on_green"))
print("The attackers have been repelled!")
print("The storm has passed, and your people live another day...")
print(colored("######### INVASION STATISTICS ##########", on_color = "on_cyan"))
print("Troops remaining:", defenders)
print("Total losses:", (1650 - defenders))
print("Survival rate:", (defenders)/1650)
print("Total assault waves:", wave)
print("#############################")
| 3.375 | 3 |
src/pretalx/orga/urls.py | martinheidegger/pretalx | 0 | 7112 | <filename>src/pretalx/orga/urls.py
from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from pretalx.event.models.event import SLUG_CHARS
from pretalx.orga.views import cards
from .views import (
admin,
auth,
cfp,
dashboard,
event,
mails,
organiser,
person,
plugins,
review,
schedule,
speaker,
submission,
)
app_name = "orga"
urlpatterns = [
url("^login/$", auth.LoginView.as_view(), name="login"),
url("^logout/$", auth.logout_view, name="logout"),
url("^reset/$", auth.ResetView.as_view(), name="auth.reset"),
url(r"^reset/(?P<token>\w+)$", auth.RecoverView.as_view(), name="auth.recover"),
url("^$", RedirectView.as_view(url="event", permanent=False)),
url("^admin/$", admin.AdminDashboard.as_view(), name="admin.dashboard"),
url("^admin/update/$", admin.UpdateCheckView.as_view(), name="admin.update"),
url("^me$", event.UserSettings.as_view(), name="user.view"),
url("^me/subuser$", person.SubuserView.as_view(), name="user.subuser"),
url(
r"^invitation/(?P<code>\w+)$",
event.InvitationView.as_view(),
name="invitation.view",
),
url(
"^organiser/$",
dashboard.DashboardOrganiserListView.as_view(),
name="organiser.list",
),
url(
"^organiser/new$", organiser.OrganiserDetail.as_view(), name="organiser.create"
),
url(
f"^organiser/(?P<organiser>[{SLUG_CHARS}]+)/",
include(
[
url("^$", organiser.OrganiserDetail.as_view(), name="organiser.view"),
url(
"^delete$",
organiser.OrganiserDelete.as_view(),
name="organiser.delete",
),
url("^teams/$", organiser.TeamDetail.as_view(), name="organiser.teams"),
url(
"^teams/new$",
organiser.TeamDetail.as_view(),
name="organiser.teams.create",
),
url(
"^teams/(?P<pk>[0-9]+)/$",
organiser.TeamDetail.as_view(),
name="organiser.teams.view",
),
url(
"^teams/(?P<pk>[0-9]+)/delete$",
organiser.TeamDelete.as_view(),
name="organiser.teams.delete",
),
url(
"^teams/(?P<pk>[0-9]+)/tracks$",
organiser.TeamTracks.as_view(),
name="organiser.teams.tracks",
),
url(
"^teams/(?P<pk>[0-9]+)/delete/(?P<user_pk>[0-9]+)$",
organiser.TeamDelete.as_view(),
name="organiser.teams.delete_member",
),
url(
"^teams/(?P<pk>[0-9]+)/reset/(?P<user_pk>[0-9]+)$",
organiser.TeamResetPassword.as_view(),
name="organiser.team.password_reset",
),
url(
"^teams/(?P<pk>[0-9]+)/uninvite$",
organiser.TeamUninvite.as_view(),
name="organiser.teams.uninvite",
),
url(
"^teams/(?P<pk>[0-9]+)/resend$",
organiser.TeamResend.as_view(),
name="organiser.teams.resend",
),
]
),
),
url("^event/new/$", event.EventWizard.as_view(), name="event.create"),
url("^event/typeahead/$", event.event_list, name="event.typeahead"),
url("^event/$", dashboard.DashboardEventListView.as_view(), name="event.list"),
url(
f"^event/(?P<event>[{SLUG_CHARS}]+)/",
include(
[
url(
"^$", dashboard.EventDashboardView.as_view(), name="event.dashboard"
),
url("^login/$", auth.LoginView.as_view(), name="event.login"),
url("^reset/$", auth.ResetView.as_view(), name="event.auth.reset"),
url(
r"^reset/(?P<token>\w+)$",
auth.RecoverView.as_view(),
name="event.auth.recover",
),
url("^delete$", event.EventDelete.as_view(), name="event.delete"),
url("^live$", event.EventLive.as_view(), name="event.live"),
url("^api/users$", person.UserList.as_view(), name="event.user_list"),
url(
"^cfp/$",
RedirectView.as_view(pattern_name="orga:cfp.text.view"),
name="cfp",
),
url("^cfp/flow/$", cfp.CfPFlowEditor.as_view(), name="cfp.flow"),
url(
"^cfp/questions/$",
cfp.CfPQuestionList.as_view(),
name="cfp.questions.view",
),
url(
"^cfp/questions/new$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.questions.create",
),
url(
"^cfp/questions/remind$",
cfp.CfPQuestionRemind.as_view(),
name="cfp.questions.remind",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.question.view",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/up$",
cfp.question_move_up,
name="cfp.questions.up",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/down$",
cfp.question_move_down,
name="cfp.questions.down",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/delete$",
cfp.CfPQuestionDelete.as_view(),
name="cfp.question.delete",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/edit$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.question.edit",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/toggle$",
cfp.CfPQuestionToggle.as_view(),
name="cfp.question.toggle",
),
url("^cfp/text$", cfp.CfPTextDetail.as_view(), name="cfp.text.view"),
url(
"^cfp/types/$",
cfp.SubmissionTypeList.as_view(),
name="cfp.types.view",
),
url(
"^cfp/types/new$",
cfp.SubmissionTypeDetail.as_view(),
name="cfp.types.create",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/$",
cfp.SubmissionTypeDetail.as_view(),
name="cfp.type.view",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/delete$",
cfp.SubmissionTypeDelete.as_view(),
name="cfp.type.delete",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/default$",
cfp.SubmissionTypeDefault.as_view(),
name="cfp.type.default",
),
url("^cfp/tracks/$", cfp.TrackList.as_view(), name="cfp.tracks.view"),
url(
"^cfp/tracks/new$",
cfp.TrackDetail.as_view(),
name="cfp.track.create",
),
url(
"^cfp/tracks/(?P<pk>[0-9]+)/$",
cfp.TrackDetail.as_view(),
name="cfp.track.view",
),
url(
"^cfp/tracks/(?P<pk>[0-9]+)/delete$",
cfp.TrackDelete.as_view(),
name="cfp.track.delete",
),
url(
"^cfp/access-codes/$",
cfp.AccessCodeList.as_view(),
name="cfp.access_code.view",
),
url(
"^cfp/access-codes/new$",
cfp.AccessCodeDetail.as_view(),
name="cfp.access_code.create",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/$",
cfp.AccessCodeDetail.as_view(),
name="cfp.access_code.view",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/send$",
cfp.AccessCodeSend.as_view(),
name="cfp.access_code.send",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/delete$",
cfp.AccessCodeDelete.as_view(),
name="cfp.access_code.delete",
),
url(
"^mails/",
include(
[
url(
"^(?P<pk>[0-9]+)/$",
mails.MailDetail.as_view(),
name="mails.outbox.mail.view",
),
url(
"^(?P<pk>[0-9]+)/copy$",
mails.MailCopy.as_view(),
name="mails.outbox.mail.copy",
),
url(
"^(?P<pk>[0-9]+)/delete$",
mails.OutboxPurge.as_view(),
name="mails.outbox.mail.delete",
),
url(
"^(?P<pk>[0-9]+)/send$",
mails.OutboxSend.as_view(),
name="mails.outbox.mail.send",
),
url(
"^templates/$",
mails.TemplateList.as_view(),
name="mails.templates.list",
),
url(
"^templates/new$",
mails.TemplateDetail.as_view(),
name="mails.templates.create",
),
url(
"^templates/(?P<pk>[0-9]+)/$",
mails.TemplateDetail.as_view(),
name="mails.templates.view",
),
url(
"^templates/(?P<pk>[0-9]+)/delete$",
mails.TemplateDelete.as_view(),
name="mails.templates.delete",
),
url(
"^compose$",
mails.ComposeMail.as_view(),
name="mails.compose",
),
url("^sent$", mails.SentMail.as_view(), name="mails.sent"),
url(
"^outbox/$",
mails.OutboxList.as_view(),
name="mails.outbox.list",
),
url(
"^outbox/send$",
mails.OutboxSend.as_view(),
name="mails.outbox.send",
),
url(
"^outbox/purge$",
mails.OutboxPurge.as_view(),
name="mails.outbox.purge",
),
]
),
),
url(
"^submissions/$",
submission.SubmissionList.as_view(),
name="submissions.list",
),
url(
"^submissions/new$",
submission.SubmissionContent.as_view(),
name="submissions.create",
),
url(
"^submissions/cards/$",
cards.SubmissionCards.as_view(),
name="submissions.cards",
),
url(
"^submissions/feed/$",
submission.SubmissionFeed(),
name="submissions.feed",
),
url(
"^submissions/statistics/$",
submission.SubmissionStats.as_view(),
name="submissions.statistics",
),
url(
"^submissions/feedback/$",
submission.AllFeedbacksList.as_view(),
name="submissions.feedback",
),
url(
r"^submissions/(?P<code>[\w-]+)/",
include(
[
url(
"^$",
submission.SubmissionContent.as_view(),
name="submissions.content.view",
),
url(
"^submit$",
submission.SubmissionStateChange.as_view(),
name="submissions.submit",
),
url(
"^accept$",
submission.SubmissionStateChange.as_view(),
name="submissions.accept",
),
url(
"^reject$",
submission.SubmissionStateChange.as_view(),
name="submissions.reject",
),
url(
"^confirm",
submission.SubmissionStateChange.as_view(),
name="submissions.confirm",
),
url(
"^withdraw$",
submission.SubmissionStateChange.as_view(),
name="submissions.withdraw",
),
url(
"^delete",
submission.SubmissionStateChange.as_view(),
name="submissions.delete",
),
url(
"^cancel",
submission.SubmissionStateChange.as_view(),
name="submissions.cancel",
),
url(
"^speakers/$",
submission.SubmissionSpeakers.as_view(),
name="submissions.speakers.view",
),
url(
"^speakers/add$",
submission.SubmissionSpeakersAdd.as_view(),
name="submissions.speakers.add",
),
url(
"^speakers/delete$",
submission.SubmissionSpeakersDelete.as_view(),
name="submissions.speakers.delete",
),
url(
"^reviews/$",
review.ReviewSubmission.as_view(),
name="submissions.reviews",
),
url(
"^reviews/delete$",
review.ReviewSubmissionDelete.as_view(),
name="submissions.reviews.submission.delete",
),
url(
"^feedback/$",
submission.FeedbackList.as_view(),
name="submissions.feedback.list",
),
url(
"^toggle_featured$",
submission.ToggleFeatured.as_view(),
name="submissions.toggle_featured",
),
url(
"^anonymise/$",
submission.Anonymise.as_view(),
name="submissions.anonymise",
),
]
),
),
url("^speakers/$", speaker.SpeakerList.as_view(), name="speakers.list"),
url(
"^speakers/(?P<pk>[0-9]+)/$",
speaker.SpeakerDetail.as_view(),
name="speakers.view",
),
url(
"^speakers/(?P<pk>[0-9]+)/reset$",
speaker.SpeakerPasswordReset.as_view(),
name="speakers.reset",
),
url(
"^speakers/(?P<pk>[0-9]+)/toggle-arrived$",
speaker.SpeakerToggleArrived.as_view(),
name="speakers.arrived",
),
url(
"^info/$",
speaker.InformationList.as_view(),
name="speakers.information.list",
),
url(
"^info/new$",
speaker.InformationDetail.as_view(),
name="speakers.information.create",
),
url(
"^info/(?P<pk>[0-9]+)/$",
speaker.InformationDetail.as_view(),
name="speakers.information.view",
),
url(
"^info/(?P<pk>[0-9]+)/delete$",
speaker.InformationDelete.as_view(),
name="speakers.information.delete",
),
url(
"^reviews/$",
review.ReviewDashboard.as_view(),
name="reviews.dashboard",
),
url(
"^reviews/regenerate/$",
review.RegenerateDecisionMails.as_view(),
name="reviews.regenerate",
),
url(
"^settings/$",
event.EventDetail.as_view(),
name="settings.event.view",
),
url(
"^settings/mail$",
event.EventMailSettings.as_view(),
name="settings.mail.view",
),
url(
"^settings/plugins$",
plugins.EventPluginsView.as_view(),
name="settings.plugins.select",
),
url(
"^settings/widget$",
event.WidgetSettings.as_view(),
name="settings.widget",
),
url(
"^settings/review/$",
event.EventReviewSettings.as_view(),
name="settings.review",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/up$",
event.phase_move_up,
name="settings.review.phase.up",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/down$",
event.phase_move_down,
name="settings.review.phase.down",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/delete$",
event.PhaseDelete.as_view(),
name="settings.review.phasedelete",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/activate$",
event.PhaseActivate.as_view(),
name="settings.review.phasedelete",
),
url(
"^schedule/$", schedule.ScheduleView.as_view(), name="schedule.main"
),
url(
"^schedule/export/$",
schedule.ScheduleExportView.as_view(),
name="schedule.export",
),
url(
"^schedule/export/trigger$",
schedule.ScheduleExportTriggerView.as_view(),
name="schedule.export.trigger",
),
url(
"^schedule/export/download$",
schedule.ScheduleExportDownloadView.as_view(),
name="schedule.export.download",
),
url(
"^schedule/release$",
schedule.ScheduleReleaseView.as_view(),
name="schedule.release",
),
url(
r"^schedule/quick/(?P<code>\w+)/$",
schedule.QuickScheduleView.as_view(),
name="schedule.quick",
),
url(
"^schedule/reset$",
schedule.ScheduleResetView.as_view(),
name="schedule.reset",
),
url(
"^schedule/toggle$",
schedule.ScheduleToggleView.as_view(),
name="schedule.toggle",
),
url(
"^schedule/resend_mails$",
schedule.ScheduleResendMailsView.as_view(),
name="schedule.resend_mails",
),
url(
"^schedule/rooms/$",
schedule.RoomList.as_view(),
name="schedule.rooms.list",
),
url(
"^schedule/rooms/new$",
schedule.RoomDetail.as_view(),
name="schedule.rooms.create",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/$",
schedule.RoomDetail.as_view(),
name="schedule.rooms.view",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/delete$",
schedule.RoomDelete.as_view(),
name="schedule.rooms.delete",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/up$",
schedule.room_move_up,
name="schedule.rooms.up",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/down$",
schedule.room_move_down,
name="schedule.rooms.down",
),
url(
"^schedule/api/talks/$",
schedule.TalkList.as_view(),
name="schedule.api.talks",
),
url(
"^schedule/api/talks/(?P<pk>[0-9]+)/$",
schedule.TalkUpdate.as_view(),
name="schedule.api.update",
),
url(
"^schedule/api/availabilities/(?P<talkid>[0-9]+)/(?P<roomid>[0-9]+)/$",
schedule.RoomTalkAvailabilities.as_view(),
name="schedule.api.availabilities",
),
]
),
),
]
| 1.84375 | 2 |
ws2122-lspm/Lib/site-packages/pm4py/statistics/overlap/utils/compute.py | Malekhy/ws2122-lspm | 1 | 7113 | <reponame>Malekhy/ws2122-lspm
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from enum import Enum
from typing import Optional, Dict, Any, Tuple, List, Union
from intervaltree import Interval, IntervalTree
from pm4py.util import exec_utils
class Parameters(Enum):
EPSILON = "epsilon"
def apply(points: List[Tuple[float, float]], parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> List[int]:
"""
Computes the overlap statistic given a list of points, expressed as (min_timestamp, max_timestamp)
Parameters
-----------------
points
List of points with the aforementioned features
parameters
Parameters of the method, including:
- Parameters.EPSILON
Returns
-----------------
overlap
List associating to each point the number of intersecting points
"""
if parameters is None:
parameters = {}
epsilon = exec_utils.get_param_value(Parameters.EPSILON, parameters, 10 ** (-5))
points = [(x[0] - epsilon, x[1] + epsilon) for x in points]
sorted_points = sorted(points)
tree = IntervalTree()
for p in sorted_points:
tree.add(Interval(p[0], p[1]))
overlap = []
for p in points:
overlap.append(len(tree[p[0]:p[1]]))
return overlap
| 2.421875 | 2 |
webapp/apps/Base Quiz/baseui_gen.py | sk-Prime/webapp | 4 | 7114 | <filename>webapp/apps/Base Quiz/baseui_gen.py
from htmlman import HTMLMan
from styleman import Template
page=HTMLMan()
page.make_responsive()
page.add_title("Base Quiz")
style=Template('antartica')
page.add_body_class(style['page'])
page.add_js("baseui.js")
page.create_section('main',append=True)
page['main'].add_style_class(style['main'])
title=page.create_section('title')
title.add_style_class(style['title'])
title.add_content("Base Quiz")
widget=page.create_section("widget")
widget.add_style_class(style['widget'])
label = page.create_section('label',ID='label')
#label.add_style_class(style['center'])
label.add_style(name='label',mode="class")
label.style_to_cssman(style)
label.style(
"font-size","20pt",
"font-family","monospace",
"height","50px",
"border-bottom","1px solid #ccd",
)
label.add_content("0x0")
answer_l=page.create_section("answer_l1",ID="label_t")
answer_l.add_style_class(style["label"])
answer_l2=page.create_section("answer_l2",ID="label_b")
answer_l2.add_style_class(style["label"])
controls = page.create_section("control")
controls.add_style(name="control",mode="class",cssman_obj=style)
controls.style(
"display","grid",
"grid-template-columns","1fr 1fr",
"gap","10px",
"padding","10px"
)
rand_b=page.create_section('random',tag="button",inner_html="Random")
rand_b.config_attr("type","button","onclick","randomize()")
answer_b=page.create_section('answer_b',tag="button",inner_html="Answer")
answer_b.config_attr("type","button","onclick","answer()")
controls.add_content(rand_b)
controls.add_content(answer_b)
widget.add_content(label)
widget.add_content(answer_l)
widget.add_content(answer_l2)
widget.add_content(controls)
page['main'].add_content(title)
page['main'].add_content(widget)
page.render(style,html_path="baseui.html") | 2.59375 | 3 |
cluster_config/cluster.py | srcc-msu/job_statistics | 0 | 7115 | <gh_stars>0
name = "cluster"
num_cores = 1000
GENERAL_PARTITIONS = ["regular"]
GPU_PARTITIONS = ["gpu"]
PARTITIONS = GENERAL_PARTITIONS + GPU_PARTITIONS
ACTIVE_JOB_STATES = ["RUNNING", "COMPLETING"]
FINISHED_JOB_STATES = ["COMPLETED", "NODE_FAIL", "TIMEOUT", "FAILED", "CANCELLED"]
JOB_STATES = ACTIVE_JOB_STATES + FINISHED_JOB_STATES
def node2int(node):
"""custom function to convert nodename to int
this one removes all chars from names like node1-001-01"""
return int(''.join(filter(lambda x: x.isdigit(), node)))
| 2.671875 | 3 |
room_assistance/indico_room_assistance/plugin.py | OmeGak/indico-plugins-cern | 4 | 7116 | # This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2021 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
import dateutil.parser
import pytz
from flask import flash, request, session
from flask_pluginengine import render_plugin_template, url_for_plugin
from indico.core import signals
from indico.core.config import config
from indico.core.plugins import IndicoPlugin
from indico.core.settings.converters import ModelListConverter
from indico.modules.events.requests.models.requests import Request, RequestState
from indico.modules.events.requests.views import WPRequestsEventManagement
from indico.modules.rb.models.rooms import Room
from indico.modules.users import User
from indico.util.string import natural_sort_key
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import EmailListField, IndicoQuerySelectMultipleField, PrincipalListField
from indico.web.menu import TopMenuItem
from indico_room_assistance import _
from indico_room_assistance.blueprint import blueprint
from indico_room_assistance.definition import RoomAssistanceRequest
from indico_room_assistance.util import (can_request_assistance_for_event, event_has_room_with_support_attached,
is_room_assistance_support)
def _order_func(object_list):
return sorted(object_list, key=lambda r: natural_sort_key(r[1].full_name))
class RoomAssistanceForm(IndicoForm):
_fieldsets = [
('Startup assistance emails', ['room_assistance_recipients', 'rooms_with_assistance',
'room_assistance_support']),
]
room_assistance_recipients = EmailListField(_('Recipients'),
description=_('Notifications about room assistance requests are sent '
'to these email addresses (one per line)'))
rooms_with_assistance = IndicoQuerySelectMultipleField('Rooms',
query_factory=lambda: Room.query,
description=_('Rooms for which users can request startup '
'assistance'),
get_label='full_name', collection_class=set,
render_kw={'size': 20}, modify_object_list=_order_func)
room_assistance_support = PrincipalListField(_('Room assistance support'), allow_groups=True,
description=_('List of users who can view the list of events with '
'room startup assistance.'))
class RoomAssistancePlugin(IndicoPlugin):
"""Room assistance request
This plugin lets users request assistance for meeting rooms.
"""
configurable = True
settings_form = RoomAssistanceForm
settings_converters = {
'rooms_with_assistance': ModelListConverter(Room)
}
acl_settings = {'room_assistance_support'}
default_settings = {
'room_assistance_recipients': [],
'rooms_with_assistance': [],
}
def init(self):
super().init()
self.inject_bundle('main.css', WPRequestsEventManagement, subclasses=False,
condition=lambda: request.view_args.get('type') == RoomAssistanceRequest.name)
self.template_hook('event-actions', self._room_assistance_action)
self.connect(signals.menu.items, self._extend_services_menu, sender='top-menu')
self.connect(signals.plugin.get_event_request_definitions, self._get_room_assistance_request)
self.connect(signals.event.updated, self._on_event_update)
def get_blueprints(self):
return blueprint
def _room_assistance_action(self, event, **kwargs):
return render_plugin_template('room_assistance_action.html', event=event,
can_request_assistance=can_request_assistance_for_event(event))
def _extend_services_menu(self, reservation, **kwargs):
if not session.user or not is_room_assistance_support(session.user):
return
return TopMenuItem('services-cern-room-assistance', _('Room assistance'),
url_for_plugin('room_assistance.request_list'), section='services')
def _get_room_assistance_request(self, sender, **kwargs):
return RoomAssistanceRequest
def _on_event_update(self, event, **kwargs):
changes = kwargs['changes']
if not changes.keys() & {'location_data', 'start_dt', 'end_dt'}:
return
request = Request.find_latest_for_event(event, RoomAssistanceRequest.name)
if not request or request.state != RequestState.accepted:
return
if 'location_data' in changes and not event_has_room_with_support_attached(event):
request.definition.reject(request, {'comment': render_plugin_template('auto_reject_no_supported_room.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event location is not in the list of the rooms supported by the room assistance team. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
if changes.keys() & {'start_dt', 'end_dt'}:
tz = pytz.timezone(config.DEFAULT_TIMEZONE)
occurrences = {dateutil.parser.parse(occ).astimezone(tz) for occ in request.data['occurrences']}
req_dates = {occ.date() for occ in occurrences}
event_dates = set(event.iter_days())
old_dates = req_dates - event_dates
has_overlapping_dates = req_dates & event_dates
if not has_overlapping_dates:
request.definition.reject(request,
{'comment': render_plugin_template('auto_reject_no_overlapping_dates.txt')},
User.get_system_user())
request.data = dict(request.data, occurrences=[])
flash(_("The new event dates don't overlap with the existing room assistance request for this event. "
"Room assistance request has been rejected and support will not be provided."), 'warning')
elif old_dates and has_overlapping_dates:
new_data = dict(request.data)
new_data['occurrences'] = [occ.astimezone(pytz.utc).isoformat() for occ in occurrences
if occ.date() in req_dates & event_dates]
request.data = new_data
flash(_("Room assistance had been requested for days that are not between the updated start/end "
"dates. Support will not be provided on these days anymore."), 'warning')
| 1.71875 | 2 |
datamart/materializers/wikidata_spo_materializer.py | liangmuxin/datamart | 7 | 7117 | <gh_stars>1-10
from datamart.materializers.materializer_base import MaterializerBase
import os
import urllib.request
import sys
import csv
import copy
import json
from typing import List
from pprint import pprint
import re
import typing
from pandas import DataFrame
import traceback
class WikidataSPOMaterializer(MaterializerBase):
property = ""
def __init__(self, **kwargs):
""" initialization and loading the city name to city id map
"""
MaterializerBase.__init__(self, **kwargs)
def get(self,
metadata: dict = None,
constrains: dict = None
) -> typing.Optional[DataFrame]:
materialization_arguments = metadata["materialization"].get("arguments", {})
self.property = materialization_arguments.get("property", "")
materialization_arguments = metadata["materialization"].get("arguments", {})
self.property = materialization_arguments.get("property", "")
prefix = 'http://sitaware.isi.edu:8080/bigdata/namespace/wdq/sparql?query='
format = '&format=json'
result = dict()
property_label = ""
main_query_encoded = self._encode_url(self._formulate_main_query(self.property))
try:
# print(prefix + main_query_encoded + format)
main_query_req = urllib.request.Request(prefix + main_query_encoded + format)
result, property_label = self._process_main_query(self._get_query_result(main_query_req))
except Exception as err:
print(err)
traceback.print_tb(err.__traceback__)
count = 0
while(True):
try:
main_query_encoded = self._encode_url(self._next(self._formulate_main_query(self.property), offset=count))
main_query_req = urllib.request.Request(prefix + main_query_encoded + format)
temp, property_label = self._process_main_query(self._get_query_result(main_query_req))
# property_label = re.sub(r"\s+", '_', property_label)
count += 1
result.update(temp)
except:
# print("property ", property, "count ", count)
break
property_label = re.sub(r"\s+", '_', property_label)
sep = ";"
values = list(result.values())
columns = ["source", "subject_label", "category", "prop_value", "value_label"]
# for val in values:
# col_name = col_name.union(set(val.keys()))
# columns = list(col_name)
rows = list()
for k, v in result.items():
v['value_label'] = list(filter(None, v['value_label']))
v['value_label'] = list() if not any(v['value_label']) else list(v['value_label'])
for k1, v1 in v.items():
if k1 != "source":
# print(k1, v1)
v[k1] = sep.join(v1)
rows.append(v)
df = DataFrame(rows, columns=columns)
# print(df)
return df
@staticmethod
def _formulate_main_query(property):
main_query = 'select distinct ?source ?source_l ?category ?prop_l ?prop_value ?know_as where{\
?source wdt:' + property + ' ?prop_value.\
?source rdfs:label ?source_l.\
?source wdt:P31/rdfs:label ?category.\
filter (lang(?category)="en")\
filter (lang(?source_l)="en")\
wd:' + property + ' rdfs:label ?prop_l.\
filter (lang(?prop_l)="en")\
optional {?prop_value rdfs:label ?know_as.\
filter (lang(?know_as)="en")}\
}'
return main_query
@staticmethod
def _formulate_id_category_query(property):
id_category_query = \
'select distinct ?identifier ?l where{\
?source wdt:' + property + ' ?value.\
?source ?id ?idValue.\
?identifier ?ref ?id.\
optional {?value rdfs:label ?know_as.\
filter (lang(?know_as)="en")}\
?identifier wikibase:directClaim ?id.\
?identifier wikibase:propertyType wikibase:ExternalId.\
?identifier rdfs:label ?l.\
?identifier schema:description ?desc.\
filter (lang(?desc)="en")\
filter (lang(?l)="en")\
}\
ORDER BY ?identifier'
return id_category_query
@staticmethod
def _next(query_sent, offset):
query_sent = query_sent + " LIMIT 1000 " + "OFFSET " + str(1000 * offset)
return query_sent
@staticmethod
def _encode_url(url):
encoded_url = urllib.parse.quote(url)
return encoded_url
@staticmethod
def _get_query_result(query_req) -> List[dict]:
data = {}
with urllib.request.urlopen(query_req) as r:
data = json.loads(r.read().decode('utf-8'))
result = data['results']['bindings']
return result
@staticmethod
def _process_id_category_query(data):
ids = dict()
for item in data:
identifier = item['l']['value']
ids[identifier] = set()
return ids
@staticmethod
def _process_main_query(data):
result = {}
property_label = ""
for item in data:
category = item['category']['value'].strip()
property_label = item['prop_l']['value'].strip()
source = item['source']['value'].strip()
prop_value = item['prop_value']['value'].strip()
know_as = item['know_as']['value'].strip() if 'know_as' in item.keys() else None
subject_l = item['source_l']['value'].strip()
# id = item['id']['value'].strip()
# id_l = item['id_l']['value'].strip()
# id_value = item['id_value']['value'].strip()
if source not in result.keys():
result[source] = dict()
result[source]['source'] = source
result[source]['category'] = set()
result[source]['prop_value'] = set()
result[source]['subject_label'] = set()
result[source]['value_label'] = set()
# result[source].update(copy.deepcopy(ids))
result[source]['prop_value'].add(prop_value)
result[source]['category'].add(category)
result[source]['subject_label'].add(subject_l)
result[source]['value_label'].add(know_as)
# result[source][id_l].add(id_value)
# pprint("ss", result)
return result, property_label
| 2.65625 | 3 |
axelrod/load_data_.py | danilobellini/Axelrod | 0 | 7118 | <filename>axelrod/load_data_.py
from typing import Dict, List, Tuple
import pkg_resources
def load_file(filename: str, directory: str) -> List[List[str]]:
"""Loads a data file stored in the Axelrod library's data subdirectory,
likely for parameters for a strategy."""
path = "/".join((directory, filename))
data_bytes = pkg_resources.resource_string(__name__, path)
data = data_bytes.decode("UTF-8", "replace")
rows = []
for line in data.split("\n"):
if line.startswith("#") or len(line) == 0:
continue
s = line.split(", ")
rows.append(s)
return rows
def load_weights(
filename: str = "ann_weights.csv", directory: str = "data"
) -> Dict[str, Tuple[int, int, List[float]]]:
"""Load Neural Network Weights."""
rows = load_file(filename, directory)
d = dict()
for row in rows:
name = str(row[0])
num_features = int(row[1])
num_hidden = int(row[2])
weights = list(map(float, row[3:]))
d[name] = (num_features, num_hidden, weights)
return d
def load_pso_tables(filename="pso_gambler.csv", directory="data"):
"""Load lookup tables."""
rows = load_file(filename, directory)
d = dict()
for row in rows:
name, a, b, c, = str(row[0]), int(row[1]), int(row[2]), int(row[3])
values = list(map(float, row[4:]))
d[(name, int(a), int(b), int(c))] = values
return d
| 3.09375 | 3 |
prescryptchain/api/views.py | genobank-io/CryptoVault | 3 | 7119 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# REST
from rest_framework.viewsets import ViewSetMixin
from rest_framework import routers, serializers, viewsets
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication
from rest_framework.permissions import IsAuthenticated, BasePermission
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.views import APIView
from rest_framework import mixins, generics
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
# our models
from blockchain.models import Block, Prescription, Transaction, Address
from blockchain.utils import pubkey_string_to_rsa, savify_key, pubkey_base64_to_rsa, pubkey_base64_from_uri
from .exceptions import NonValidPubKey
# Define router
router = routers.DefaultRouter()
class PrescriptionSerializer(serializers.ModelSerializer):
""" Prescription serializer """
timestamp = serializers.DateTimeField(read_only=False)
data = serializers.JSONField(binary=False, read_only=False, required=False)
files = serializers.JSONField(binary=False, read_only=False, required=False)
previous_hash = serializers.CharField(read_only=False, required=False, default="0")
class Meta:
model = Prescription
fields = (
'id',
'public_key',
'data',
"files",
'timestamp',
'signature',
'previous_hash',
'raw_size',
'hash_id',
'is_valid',
'transaction',
'readable',
)
read_only_fields = ('id', 'hash_id', 'is_valid',' transaction',)
def validate(self, data):
''' Method to control Extra Keys on Payload!'''
extra_keys = set(self.initial_data.keys()) - set(self.fields.keys())
if extra_keys:
print(extra_keys)
return data
def create(self, validated_data):
return Transaction.objects.create_tx(data=validated_data)
class PrescriptionViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
# Temporally without auth
# authentication_classes = (TokenAuthentication, BasicAuthentication, )
# permission_classes = (IsAuthenticated, )
serializer_class = PrescriptionSerializer
lookup_field = "hash_id"
http_method_names = ['get', 'post', 'options']
def get_queryset(self):
''' Custom Get queryset '''
raw_public_key = self.request.query_params.get('public_key', None)
if raw_public_key:
try:
pub_key = pubkey_string_to_rsa(raw_public_key)
except:
pub_key , raw_public_key = pubkey_base64_to_rsa(raw_public_key)
hex_raw_pub_key = savify_key(pub_key)
return Prescription.objects.filter(public_key=hex_raw_pub_key).order_by('-id')
else:
return Prescription.objects.all().order_by('-id')
# add patient filter by email, after could modify with other
router.register(r'rx-endpoint', PrescriptionViewSet, 'prescription-endpoint')
class BlockSerializer(serializers.ModelSerializer):
""" Prescription serializer """
class Meta:
model = Block
fields = (
'id',
'hash_block',
'previous_hash',
'raw_size',
'data',
'timestamp',
'merkleroot',
'hashcash',
'nonce',
)
read_only_fields = ('id', 'hash_block','timestamp','previous_hash', 'raw_size', 'data', 'merkleroot','hashcash','nonce',)
class BlockViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
serializer_class = BlockSerializer
def get_queryset(self):
return Block.objects.all().order_by('-timestamp')
# add patient filter by email, after could modify with other
router.register(r'block', BlockViewSet, 'block-endpoint')
class AddressSerializer(serializers.ModelSerializer):
""" Address serializer """
pub_key = serializers.CharField(read_only=True,allow_null=True, source="get_pub_key" )
class Meta:
model = Address
fields = (
'public_key_b64',
'address',
'is_valid',
'pub_key',
)
read_only_fields = ('address','pub_key', )
class AddressViewSet(viewsets.ModelViewSet):
""" Prescription Viewset """
serializer_class = AddressSerializer
lookup_field = "address"
http_method_names = ['get', 'options']
def get_queryset(self):
''' Custom Get queryset '''
raw_public_key = self.request.query_params.get('public_key', None)
if raw_public_key:
try:
pub_key_b64 = pubkey_base64_from_uri(raw_public_key)
except Exception as e:
raise NonValidPubKey
else:
_address = Address.objects.get_or_create_rsa_address(pub_key_b64)
return Address.objects.filter(address=_address)
else:
return Address.objects.all()
# add patient filter by email, after could modify with other
router.register(r'address', AddressViewSet, 'address_endpoint')
| 1.992188 | 2 |
client/commands/incremental.py | stvreumi/pyre-check | 0 | 7120 | <reponame>stvreumi/pyre-check
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import atexit
import logging
import os
import subprocess
import sys
from typing import List
from .command import ClientException, ExitCode, State
from .reporting import Reporting
from .start import Start
LOG = logging.getLogger(__name__)
class Incremental(Reporting):
NAME = "incremental"
def __init__(self, arguments, configuration, analysis_directory) -> None:
super(Incremental, self).__init__(arguments, configuration, analysis_directory)
def _run(self) -> None:
if self._state() == State.DEAD:
LOG.warning("Starting server at `%s`.", self._analysis_directory.get_root())
arguments = self._arguments
arguments.terminal = False
arguments.no_watchman = False
Start(arguments, self._configuration, self._analysis_directory).run()
if self._state() != State.DEAD:
LOG.info("Waiting for server...")
result = self._call_client(command=self.NAME)
try:
result.check()
errors = self._get_errors(result)
self._print(errors)
except ClientException as exception:
LOG.error("Error while waiting for server.")
LOG.error("Run `%s restart` in order to restart the server.", sys.argv[0])
self._exit_code = ExitCode.FAILURE
def _flags(self) -> List[str]:
flags = super()._flags()
flags.extend(
[
"-typeshed",
self._configuration.typeshed,
"-expected-binary-version",
self._configuration.version_hash,
]
)
search_path = self._configuration.search_path
if search_path:
flags.extend(["-search-path", ",".join(search_path)])
return flags
# pyre-ignore: T31696900
def _read_stderr(self, _stream, analysis_directory) -> None:
stderr_file = os.path.join(analysis_directory, ".pyre/server/server.stdout")
with subprocess.Popen(
["tail", "-f", stderr_file],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
) as stderr_tail:
atexit.register(stderr_tail.terminate)
super(Incremental, self)._read_stderr(
stderr_tail.stdout, analysis_directory
)
| 2.171875 | 2 |
main_random_policy.py | rish-raghu/Object-Goal-Navigation | 0 | 7121 | from collections import deque, defaultdict
import os
import sys
import logging
import time
import json
import gym
import torch.nn as nn
import torch
import numpy as np
import matplotlib.pyplot as plt
from model import RL_Policy, Semantic_Mapping
from utils.storage import GlobalRolloutStorage
from envs import make_vec_envs
from arguments import get_args
import algo
os.environ["OMP_NUM_THREADS"] = "1"
def main():
args = get_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Setup Logging
log_dir = "{}/models/{}/".format(args.dump_location, args.exp_name)
dump_dir = "{}/dump/{}/".format(args.dump_location, args.exp_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
logging.basicConfig(
filename=log_dir + 'train.log',
level=logging.INFO)
print("Dumping at {}".format(log_dir))
print(args)
logging.info(args)
# Logging and loss variables
num_scenes = args.num_processes
num_episodes = int(args.num_eval_episodes)
device = args.device = torch.device("cuda:0" if args.cuda else "cpu")
g_masks = torch.ones(num_scenes).float().to(device)
best_g_reward = -np.inf
# one episode per process for both train and eval
# for eval, one scene per process
if args.eval:
episode_success = []
episode_spl = []
episode_dist = []
for _ in range(args.num_processes):
episode_success.append(deque(maxlen=num_episodes))
episode_spl.append(deque(maxlen=num_episodes))
episode_dist.append(deque(maxlen=num_episodes))
# for train, different episodes of same scene per process
else:
episode_success = deque(maxlen=1000)
episode_spl = deque(maxlen=1000)
episode_dist = deque(maxlen=1000)
finished = np.zeros((args.num_processes))
wait_env = np.zeros((args.num_processes))
g_episode_rewards = deque(maxlen=1000)
g_value_losses = deque(maxlen=1000)
g_action_losses = deque(maxlen=1000)
g_dist_entropies = deque(maxlen=1000)
per_step_g_rewards = deque(maxlen=1000)
g_process_rewards = np.zeros((num_scenes))
# Starting environments
torch.set_num_threads(1)
envs = make_vec_envs(args)
obs, infos = envs.reset()
full_episode_data = []
episode_data = [None] * num_scenes
for e, info in enumerate(infos):
cInfo = info.copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
torch.set_grad_enabled(False)
# Initialize map variables:
# Full map consists of multiple channels containing the following:
# 1. Obstacle Map
# 2. Exploread Area (places that are known to be free or occupied)
# 3. Current Agent Location
# 4. Past Agent Locations
# 5,6,7,.. : Semantic Categories
nc = args.num_sem_categories + 4 # num channels
# Calculating full and local map sizes
map_size = args.map_size_cm // args.map_resolution
full_w, full_h = map_size, map_size
local_w = int(full_w / args.global_downscaling)
local_h = int(full_h / args.global_downscaling)
# Initializing full and local map
full_map = torch.zeros(num_scenes, nc, full_w, full_h).float().to(device)
local_map = torch.zeros(num_scenes, nc, local_w,
local_h).float().to(device)
# Initial full and local pose
full_pose = torch.zeros(num_scenes, 3).float().to(device)
local_pose = torch.zeros(num_scenes, 3).float().to(device)
# Origin of local map
origins = np.zeros((num_scenes, 3))
# Local Map Boundaries
lmb = np.zeros((num_scenes, 4)).astype(int)
# Planner pose inputs has 7 dimensions
# 1-3 store continuous global agent location
# 4-7 store local map boundaries
planner_pose_inputs = np.zeros((num_scenes, 7))
# get local boundary (x1, x2, y1, y2) given local agent position (x, y) and map size
def get_local_map_boundaries(agent_loc, local_sizes, full_sizes):
loc_r, loc_c = agent_loc
local_w, local_h = local_sizes
full_w, full_h = full_sizes
if args.global_downscaling > 1:
gx1, gy1 = loc_r - local_w // 2, loc_c - local_h // 2
gx2, gy2 = gx1 + local_w, gy1 + local_h
if gx1 < 0:
gx1, gx2 = 0, local_w
if gx2 > full_w:
gx1, gx2 = full_w - local_w, full_w
if gy1 < 0:
gy1, gy2 = 0, local_h
if gy2 > full_h:
gy1, gy2 = full_h - local_h, full_h
else:
gx1, gx2, gy1, gy2 = 0, full_w, 0, full_h
return [gx1, gx2, gy1, gy2]
# initialize global and local maps and poses given that initial position
# is at map center with 0 orientation
def init_map_and_pose():
full_map.fill_(0.)
full_pose.fill_(0.)
full_pose[:, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
# 3x3 grid around agent location is considered explored
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
for e in range(num_scenes):
local_map[e] = full_map[e, :,
lmb[e, 0]:lmb[e, 1],
lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# identical to above, except for specific environment
def init_map_and_pose_for_env(e):
full_map[e].fill_(0.)
full_pose[e].fill_(0.)
full_pose[e, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose[e].cpu().numpy()
planner_pose_inputs[e, :3] = locs
r, c = locs[1], locs[0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
local_map[e] = full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# reward is the newly explored area in a given step (in m^2)
def update_intrinsic_rew(e):
prev_explored_area = full_map[e, 1].sum(1).sum(0)
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \
local_map[e]
curr_explored_area = full_map[e, 1].sum(1).sum(0)
intrinsic_rews[e] = curr_explored_area - prev_explored_area
intrinsic_rews[e] *= (args.map_resolution / 100.)**2 # to m^2
def get_random_goal(e):
for _ in range(20):
goal = np.random.rand(2)
goal = [int(goal[0] * local_w), int(goal[1] * local_w)]
goal = [min(goal[0], int(local_w-1)), min(goal[1], int(local_w-1))]
if not local_map[e, 1, goal[0], goal[1]]: break
return goal
init_map_and_pose()
# Global policy observation space
ngc = 8 + args.num_sem_categories
es = 2
g_observation_space = gym.spaces.Box(0, 1, # binary local map
(ngc,
local_w,
local_h), dtype='uint8')
# Semantic Mapping
sem_map_module = Semantic_Mapping(args).to(device)
sem_map_module.eval()
intrinsic_rews = torch.zeros(num_scenes).to(device)
# Predict semantic map from frame 1
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx in range(num_scenes)])
).float().to(device)
# args (obs, pose_obs, maps_last, poses_last)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
local_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.
episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])])
episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"]))
global_goals = [get_random_goal(e) for e in range(num_scenes)]
goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)]
for e in range(num_scenes):
goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1
episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0))
episode_data[e]["used_policy"].append(True)
planner_inputs = [{} for e in range(num_scenes)]
for e, p_input in enumerate(planner_inputs):
p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy() # obstacles
p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy() # explored
p_input['pose_pred'] = planner_pose_inputs[e] # global location+local map bounds
p_input['goal'] = goal_maps[e] # global_goals[e]
p_input['new_goal'] = 1
p_input['found_goal'] = 0
p_input['wait'] = wait_env[e] or finished[e]
if args.visualize or args.print_images:
local_map[e, -1, :, :] = 1e-5 # TODO: what is this?
# single channel where each grid loc is cat ID
p_input['sem_map_pred'] = local_map[e, 4:, :, :
].argmax(0).cpu().numpy()
obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs)
start = time.time()
g_reward = 0
torch.set_grad_enabled(False)
spl_per_category = defaultdict(list)
success_per_category = defaultdict(list)
for step in range(args.num_training_frames // args.num_processes + 1):
if finished.sum() == args.num_processes:
break
g_step = (step // args.num_local_steps) % args.num_global_steps # global step num in PPO
l_step = step % args.num_local_steps # local step num in global step
# ------------------------------------------------------------------
# Reinitialize variables when episode ends
l_masks = torch.FloatTensor([0 if x else 1
for x in done]).to(device)
g_masks *= l_masks
for e, x in enumerate(done):
if x:
spl = infos[e]['spl']
success = infos[e]['success']
dist = infos[e]['distance_to_goal']
spl_per_category[infos[e]['goal_name']].append(spl)
success_per_category[infos[e]['goal_name']].append(success)
if args.eval:
episode_success[e].append(success)
episode_spl[e].append(spl)
episode_dist[e].append(dist)
if len(episode_success[e]) == num_episodes:
finished[e] = 1
episode_data[e]["success"] = success
episode_data[e]["spl"] = spl
episode_data[e]["distance_to_goal"] = dist
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = local_map[e]
episode_data[e]["explored_area"] = full_map[e, 1].sum(1).sum(0).item()
scene = episode_data[e]["scene_id"][16:-4]
if args.save_maps:
np.save('{}/maparr_{}_{}'.format(dump_dir, scene, episode_data[e]['episode_id']), full_map[e].cpu().numpy())
full_episode_data.append(episode_data[e])
cInfo = infos[e].copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
else:
episode_success.append(success)
episode_spl.append(spl)
episode_dist.append(dist)
wait_env[e] = 1.
update_intrinsic_rew(e)
init_map_and_pose_for_env(e)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Semantic Mapping Module
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx
in range(num_scenes)])
).float().to(device)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs + origins
local_map[:, 2, :, :].fill_(0.) # Resetting current location channel
# update current location
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
local_map[e, 2:4, loc_r - 2:loc_r + 3, loc_c - 2:loc_c + 3] = 1.
if args.eval and not wait_env[e]:
episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])])
episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"]))
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Global Policy
if l_step == args.num_local_steps - 1:
# For every global step, update the full and local maps
for e in range(num_scenes):
if wait_env[e] == 1: # New episode
wait_env[e] = 0.
else:
update_intrinsic_rew(e)
# update global map and pose based on new position in old local frame
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \
local_map[e]
full_pose[e] = local_pose[e] + \
torch.from_numpy(origins[e]).to(device).float()
# center the local frame based on new position
locs = full_pose[e].cpu().numpy()
r, c = locs[1], locs[0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
# compute new local map and pose based on new local frame
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
local_map[e] = full_map[e, :,
lmb[e, 0]:lmb[e, 1],
lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
locs = local_pose.cpu().numpy()
# Get exploration reward and metrics
g_reward = torch.from_numpy(np.asarray(
[infos[env_idx]['g_reward'] for env_idx in range(num_scenes)])
).float().to(device)
g_reward += args.intrinsic_rew_coeff * intrinsic_rews.detach()
for e in range(num_scenes):
if args.eval and not wait_env[e]:
episode_data[e]["goal_rewards"].append(infos[e]["g_reward"])
episode_data[e]["explore_rewards"].append(intrinsic_rews[e].item())
g_process_rewards += g_reward.cpu().numpy()
g_total_rewards = g_process_rewards * \
(1 - g_masks.cpu().numpy())
g_process_rewards *= g_masks.cpu().numpy()
per_step_g_rewards.append(np.mean(g_reward.cpu().numpy()))
if np.sum(g_total_rewards) != 0:
for total_rew in g_total_rewards:
if total_rew != 0:
g_episode_rewards.append(total_rew)
global_goals = [get_random_goal(e) for e in range(num_scenes)]
for e in range(num_scenes):
if args.eval and not wait_env[e]:
episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0))
g_reward = 0
g_masks = torch.ones(num_scenes).float().to(device)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Update long-term goal if target object is found
found_goal = [0 for _ in range(num_scenes)]
goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)]
# If goal category not found in map, goal is the location sampled by
# policy
for e in range(num_scenes):
goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1
if args.eval and not wait_env[e]:
episode_data[e]["used_policy"].append(True)
# Else if goal category found in map, use all locations where prob of goal
# obj existing is > 0 as the goal map for planner
for e in range(num_scenes):
cn = infos[e]['goal_cat_id'] + 4
if local_map[e, cn, :, :].sum() != 0.:
cat_semantic_map = local_map[e, cn, :, :].cpu().numpy()
cat_semantic_scores = cat_semantic_map
cat_semantic_scores[cat_semantic_scores > 0] = 1.
goal_maps[e] = cat_semantic_scores
found_goal[e] = 1
if args.eval and not wait_env[e]:
episode_data[e]["used_policy"][-1] = False
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Take action and get next observation
planner_inputs = [{} for e in range(num_scenes)]
for e, p_input in enumerate(planner_inputs):
p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy()
p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy()
p_input['pose_pred'] = planner_pose_inputs[e]
p_input['goal'] = goal_maps[e] # global_goals[e]
p_input['new_goal'] = l_step == args.num_local_steps - 1
p_input['found_goal'] = found_goal[e]
p_input['wait'] = wait_env[e] or finished[e]
if args.visualize or args.print_images:
local_map[e, -1, :, :] = 1e-5
p_input['sem_map_pred'] = local_map[e, 4:, :,
:].argmax(0).cpu().numpy()
obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs)
# ------------------------------------------------------------------
# Logging
if len(full_episode_data) % args.episode_save_interval == 0:
with open('{}/{}_episode_data.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(full_episode_data, f)
if step % args.log_interval == 0:
end = time.time()
time_elapsed = time.gmtime(end - start)
log = " ".join([
"Time: {0:0=2d}d".format(time_elapsed.tm_mday - 1),
"{},".format(time.strftime("%Hh %Mm %Ss", time_elapsed)),
"num timesteps {},".format(step * num_scenes),
"FPS {},".format(int(step * num_scenes / (end - start)))
])
log += "\n\tRewards:"
if len(g_episode_rewards) > 0:
log += " ".join([
" Global step mean/med rew:",
"{:.4f}/{:.4f},".format(
np.mean(per_step_g_rewards),
np.median(per_step_g_rewards)),
" Global eps mean/med/min/max eps rew:",
"{:.3f}/{:.3f}/{:.3f}/{:.3f},".format(
np.mean(g_episode_rewards),
np.median(g_episode_rewards),
np.min(g_episode_rewards),
np.max(g_episode_rewards))
])
if args.eval:
total_success = []
total_spl = []
total_dist = []
for e in range(args.num_processes):
for acc in episode_success[e]:
total_success.append(acc)
for dist in episode_dist[e]:
total_dist.append(dist)
for spl in episode_spl[e]:
total_spl.append(spl)
if len(total_spl) > 0:
log += " ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(total_success),
np.mean(total_spl),
np.mean(total_dist),
len(total_spl))
else:
if len(episode_success) > 100:
log += " ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(episode_success),
np.mean(episode_spl),
np.mean(episode_dist),
len(episode_spl))
log += "\n\tLosses:"
if len(g_value_losses) > 0 and not args.eval:
log += " ".join([
" Policy Loss value/action/dist:",
"{:.3f}/{:.3f}/{:.3f},".format(
np.mean(g_value_losses),
np.mean(g_action_losses),
np.mean(g_dist_entropies))
])
print(log)
logging.info(log)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Save best models
if (step * num_scenes) % args.save_interval < \
num_scenes:
if len(g_episode_rewards) >= 1000 and \
(np.mean(g_episode_rewards) >= best_g_reward) \
and not args.eval:
torch.save(g_policy.state_dict(),
os.path.join(log_dir, "model_best.pth"))
best_g_reward = np.mean(g_episode_rewards)
# Save periodic models
if (step * num_scenes) % args.save_periodic < \
num_scenes:
total_steps = step * num_scenes
if not args.eval:
torch.save(g_policy.state_dict(),
os.path.join(dump_dir,
"periodic_{}.pth".format(total_steps)))
# ------------------------------------------------------------------
# Print and save model performance numbers during evaluation
if args.eval:
print("Dumping eval details...")
total_success = []
total_spl = []
total_dist = []
for e in range(args.num_processes):
for acc in episode_success[e]:
total_success.append(acc)
for dist in episode_dist[e]:
total_dist.append(dist)
for spl in episode_spl[e]:
total_spl.append(spl)
if len(total_spl) > 0:
log = "Final ObjectNav succ/spl/dtg:"
log += " {:.3f}/{:.3f}/{:.3f}({:.0f}),".format(
np.mean(total_success),
np.mean(total_spl),
np.mean(total_dist),
len(total_spl))
print(log)
logging.info(log)
# Save the spl per category
log = "Success | SPL per category\n"
for key in success_per_category:
log += "{}: {} | {}\n".format(key,
sum(success_per_category[key]) /
len(success_per_category[key]),
sum(spl_per_category[key]) /
len(spl_per_category[key]))
print(log)
logging.info(log)
with open('{}/{}_spl_per_cat_pred_thr.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(spl_per_category, f)
with open('{}/{}_success_per_cat_pred_thr.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(success_per_category, f)
with open('{}/{}_episode_data.json'.format(
dump_dir, args.split), 'w') as f:
json.dump(full_episode_data, f)
if __name__ == "__main__":
main()
| 2.03125 | 2 |
src/ITN/srmg/core/RiemannianRight.py | Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection | 1 | 7122 | <filename>src/ITN/srmg/core/RiemannianRight.py
#!/usr/bin/env python
# coding=utf-8
'''
Author: <NAME> / Yulv
Email: <EMAIL>
Date: 2022-03-19 10:33:38
Motto: Entities should not be multiplied unnecessarily.
LastEditors: <NAME>
LastEditTime: 2022-03-23 00:52:55
FilePath: /Awesome-Ultrasound-Standard-Plane-Detection/src/ITN/srmg/core/RiemannianRight.py
Description: Modify here please
Init from https://github.com/yuanwei1989/plane-detection Author: <NAME> (3 Oct 2018)
# Copyright (c) 2006-2017, <NAME>, <NAME>, <NAME>
# Copyright (c) 2006-2017, Imperial College of Science, Technology and Medicine
# Produced at Biomedical Image Analysis Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
Statistics on Riemannian Manifolds and Groups
---------------------------------------------
This is a set of codes to compare the computing of the different types of means on Lie groups.
These codes can be used to reproduce the experiments illustrated in the video developed for the
MICCAI Educational challenge 2014, available at: url of the video.
:Authors:
`<NAME> <website>`
`<NAME> <website>`
:Organization:
Asclepios Team, INRIA Sophia Antipolis.
:Version:
2017.07.05
Requirements
------------
* `Numpy 1.11 <http://www.numpy.org>`_
Notes
-----
----------
(1) Defining a mean on Lie group.
<NAME>. Medical Imaging. 2013. <hal-00938320>
'''
import numpy
import math
from srmg.common.group import *
from srmg.common.util import *
EPS = 1e-5
def riemExpR(a,f0,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from any point f0 (for left- and right-invariant metric)
"""
f = grpCompose((riemExpIdR(a, numpy.linalg.lstsq(jR(f0),v)[0])), f0)
return f
def riemExpIdR(a,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from Id (for left- and right-invariant metric)
"""
v=grpReg(-v);
f = numpy.zeros(6)
f[0:3] = v[0:3]
f[3:6] = a * v[3:6]
f = grpInv(f)
return f
def sigma2R(a,m,tabf,tabw):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating variance requires at least 2 points')
return 0
s = 0
for i in range(0,siz):
s = s + tabw[i] * normA2R(a,m,riemLogR(a,m,tabf[i,:]));
return s
def riemLogR(a,f0,f):
"""
DESCRIPTION
Attributes:
a: ?????
f0: ????
f: ????
Return:
v: ?????
"""
v=numpy.dot(jR(f0),riemLogIdR(a,grpCompose(f,grpInv(f0))))
return v
def riemLogIdR(a,f):
"""
DESCRIPTION
Attributes:
a: ?????
f: ????
Return:
v: ?????
"""
v = numpy.zeros(6)
v[0:3] = f[0:3]
v[3:6] = numpy.dot(rotMat(-f[0:3]),f[3:6]);
return v
def qR(a,f):
"""
Left- and right- invariant inner product in the principal chart (propagation of Frobenius inner product)
Attributes:
a: ?????
f: ????
Return:
g: ?????
"""
f = grpReg(f)
g0 = numpy.zeros([6,6])
g0[0:3,0:3] = numpy.eye(3)
g0[3:6,3:6] = a * numpy.eye(3)
g = numpy.dot(numpy.dot(numpy.linalg.inv(jR(f).T) , g0) , numpy.linalg.inv(jR(f)))
return g
def jR(f):
"""
Differentials of the left and right translations for SO(3) in the principal chart
Attributes:
r: ?????
Return:
Jl: ?????
"""
#f = makeColVector(f,6); # unnecessary if 1D
f = grpReg(f);
Jr = numpy.zeros([6,6])
Jr[0:3,0:3] = jRotR(f[0:3]);
Jr[3:6,0:3] = -skew(f[3:6]);
Jr[3:6,3:6] = numpy.eye(3);
return Jr
def normA2R(a,f,v):
"""
This function calculates the normalised left
Attributes:
a: ?????
f: ?????
v: ?????
Return:
n: normalised vector
"""
v=grpReg(v);
n=numpy.dot(numpy.dot(v.T,qR(a,f)),v);
return n
def frechetR(a,tabf,tabw):
"""
This function computes the frechet-L mean
Attributes:
img: The fixed image that will be transformed (simpleitk type)
a: ?????
tabf: SE3 data points (Nx6 vector)
tabw: data point weights (Nx1 vector)
Return:
m: The mean
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating mean requires at least 2 points')
m = tabf[0,:]
# Iteration 0
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
# Iteration 1 until converges
while (normA2R(a,mbis,riemLogR(a,mbis,m))>EPS*sigma2R(a,mbis,tabf,tabw)):
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
return m
| 1.195313 | 1 |
v0449gRpc_pb2.py | StormDev87/VPH_bot_python | 1 | 7123 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v0449gRpc.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0fv0449gRpc.proto\x12\tv0449gRpc\"\x1b\n\x0b\x64\x61taRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1a\n\x08\x64\x61ta2Plc\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1f\n\x0cslaveReq2Plc\x12\x0f\n\x07request\x18\x01 \x01(\x05\"\x1a\n\x08\x64\x61ta2Hmi\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1b\n\ndata2PlcJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1b\n\ndata2HmiJs\x12\r\n\x05jsSer\x18\x01 \x01(\t\"\x1c\n\ndata2PlcPb\x12\x0e\n\x06xmlSer\x18\x01 \x01(\t\"\x1d\n\ndataAnswer\x12\x0f\n\x07message\x18\x01 \x01(\t2\x93\x01\n\x0cv0449gRpcSvc\x12=\n\x0bxchRtDataJs\x12\x15.v0449gRpc.data2PlcJs\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x12\x44\n\x10xchRtDataJsSlave\x12\x17.v0449gRpc.slaveReq2Plc\x1a\x15.v0449gRpc.data2HmiJs\"\x00\x62\x06proto3')
_DATAREQUEST = DESCRIPTOR.message_types_by_name['dataRequest']
_DATA2PLC = DESCRIPTOR.message_types_by_name['data2Plc']
_SLAVEREQ2PLC = DESCRIPTOR.message_types_by_name['slaveReq2Plc']
_DATA2HMI = DESCRIPTOR.message_types_by_name['data2Hmi']
_DATA2PLCJS = DESCRIPTOR.message_types_by_name['data2PlcJs']
_DATA2HMIJS = DESCRIPTOR.message_types_by_name['data2HmiJs']
_DATA2PLCPB = DESCRIPTOR.message_types_by_name['data2PlcPb']
_DATAANSWER = DESCRIPTOR.message_types_by_name['dataAnswer']
dataRequest = _reflection.GeneratedProtocolMessageType('dataRequest', (_message.Message,), {
'DESCRIPTOR' : _DATAREQUEST,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.dataRequest)
})
_sym_db.RegisterMessage(dataRequest)
data2Plc = _reflection.GeneratedProtocolMessageType('data2Plc', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLC,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2Plc)
})
_sym_db.RegisterMessage(data2Plc)
slaveReq2Plc = _reflection.GeneratedProtocolMessageType('slaveReq2Plc', (_message.Message,), {
'DESCRIPTOR' : _SLAVEREQ2PLC,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.slaveReq2Plc)
})
_sym_db.RegisterMessage(slaveReq2Plc)
data2Hmi = _reflection.GeneratedProtocolMessageType('data2Hmi', (_message.Message,), {
'DESCRIPTOR' : _DATA2HMI,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2Hmi)
})
_sym_db.RegisterMessage(data2Hmi)
data2PlcJs = _reflection.GeneratedProtocolMessageType('data2PlcJs', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLCJS,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcJs)
})
_sym_db.RegisterMessage(data2PlcJs)
data2HmiJs = _reflection.GeneratedProtocolMessageType('data2HmiJs', (_message.Message,), {
'DESCRIPTOR' : _DATA2HMIJS,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2HmiJs)
})
_sym_db.RegisterMessage(data2HmiJs)
data2PlcPb = _reflection.GeneratedProtocolMessageType('data2PlcPb', (_message.Message,), {
'DESCRIPTOR' : _DATA2PLCPB,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.data2PlcPb)
})
_sym_db.RegisterMessage(data2PlcPb)
dataAnswer = _reflection.GeneratedProtocolMessageType('dataAnswer', (_message.Message,), {
'DESCRIPTOR' : _DATAANSWER,
'__module__' : 'v0449gRpc_pb2'
# @@protoc_insertion_point(class_scope:v0449gRpc.dataAnswer)
})
_sym_db.RegisterMessage(dataAnswer)
_V0449GRPCSVC = DESCRIPTOR.services_by_name['v0449gRpcSvc']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DATAREQUEST._serialized_start=30
_DATAREQUEST._serialized_end=57
_DATA2PLC._serialized_start=59
_DATA2PLC._serialized_end=85
_SLAVEREQ2PLC._serialized_start=87
_SLAVEREQ2PLC._serialized_end=118
_DATA2HMI._serialized_start=120
_DATA2HMI._serialized_end=146
_DATA2PLCJS._serialized_start=148
_DATA2PLCJS._serialized_end=175
_DATA2HMIJS._serialized_start=177
_DATA2HMIJS._serialized_end=204
_DATA2PLCPB._serialized_start=206
_DATA2PLCPB._serialized_end=234
_DATAANSWER._serialized_start=236
_DATAANSWER._serialized_end=265
_V0449GRPCSVC._serialized_start=268
_V0449GRPCSVC._serialized_end=415
# @@protoc_insertion_point(module_scope)
| 1.140625 | 1 |
api/resources_portal/test/views/test_search_endpoint.py | AlexsLemonade/resources-portal | 0 | 7124 | import datetime
from django.core.management import call_command
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from resources_portal.management.commands.populate_dev_database import populate_dev_database
from resources_portal.models import Material, Organization, User
class SearchMaterialsEndpointTestCase(APITestCase):
"""
Tests /search/materials operations.
"""
@classmethod
def setUpClass(cls):
super(SearchMaterialsEndpointTestCase, cls).setUpClass()
populate_dev_database()
# Put newly created materials in the search index
call_command("search_index", "-f", "--rebuild")
cls.primary_prof = User.objects.get(username="PrimaryProf")
cls.secondary_prof = User.objects.get(username="SecondaryProf")
cls.post_doc = User.objects.get(username="PostDoc")
cls.primary_lab = Organization.objects.get(name="PrimaryLab")
cls.material1 = Material.objects.get(title="Melanoma Reduction Plasmid")
cls.material2 = Material.objects.get(title="Allele Extraction Protocol")
@classmethod
def tearDownClass(cls):
super(SearchMaterialsEndpointTestCase, cls).tearDownClass()
# Rebuild search index with what's actaully in the django database
call_command("search_index", "-f", "--rebuild")
def test_search_for_title_finds_a_given_material(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?search=" + self.material1.title
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_result_id = int(response.json()["results"][0]["id"])
self.assertEqual(first_result_id, self.material1.id)
def test_filter_on_organization_retrieves_all_organization_materials(self):
# Archive one material to make sure it goes to the bottom of the list.
archived_material = Material.objects.first()
archived_material.is_archived = True
archived_material.save()
self.client.force_authenticate(user=self.primary_prof)
search_url = (
reverse("search-materials-list")
+ "?organization="
+ self.primary_lab.name
+ "&limit=25"
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_json = response.json()
material_count = int(response_json["count"])
# Make sure archived materials are last:
self.assertEqual(response_json["results"][-1]["id"], archived_material.id)
material_titles = []
for material in response_json["results"]:
material_titles.append(material["title"])
self.assertEqual(material_count, len(self.primary_lab.materials.all()))
for title in material_titles:
self.assertTrue(
Material.objects.filter(title=title, organization=self.primary_lab).exists()
)
def test_filter_on_category_retrieves_all_materials_of_a_given_category(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?category=" + "MODEL_ORGANISM"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_count = int(response.json()["count"])
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
self.assertEqual(material_count, len(Material.objects.filter(category="MODEL_ORGANISM")))
for title in material_titles:
self.assertTrue(
Material.objects.filter(title=title, category="MODEL_ORGANISM").exists()
)
def test_filter_on_organisms_retrieves_all_materials_with_one_organism(self):
self.client.force_authenticate(user=self.primary_prof)
# Search with one organism name
search_url = reverse("search-materials-list") + "?organisms=" + "danio rerio"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organism_count = int(response.json()["count"])
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
database_organism_count = 0
database_titles = []
for material in Material.objects.all():
if material.organisms:
if "Danio rerio" in material.organisms:
database_organism_count += 1
database_titles.append(material.title)
self.assertEqual(organism_count, database_organism_count)
for title in material_titles:
self.assertTrue(title in database_titles)
def test_filter_on_organisms_retrieves_all_materials_with_multiple_organisms(self):
self.client.force_authenticate(user=self.primary_prof)
# Search with one organism name
search_url = (
reverse("search-materials-list")
+ "?organisms="
+ "danio rerio"
+ "&organisms="
+ "mus musculus"
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organism_count = int(response.json()["count"])
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
database_organism_count = 0
database_titles = []
for material in Material.objects.all():
if material.organisms:
if ("Danio rerio" in material.organisms) or ("Mus musculus" in material.organisms):
database_organism_count += 1
database_titles.append(material.title)
self.assertEqual(organism_count, database_organism_count)
for title in material_titles:
self.assertTrue(title in database_titles)
def test_ordering_on_updated_at_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?ordering=" + "updated_at"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_dates = []
for material in response.json()["results"]:
date = datetime.datetime.strptime(
material["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z"
).date()
material_dates.append(date)
self.assertEqual(material_dates, sorted(material_dates))
def test_combine_search_and_filter_and_ordering_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = (
reverse("search-materials-list")
+ "?search=MODEL_ORGANISM"
+ "ordering=updated_at"
+ "has_pre_print=true"
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_dates = []
material_titles = []
for material in response.json()["results"]:
material_titles.append(material["title"])
date = datetime.datetime.strptime(
material["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z"
).date()
material_dates.append(date)
self.assertEqual(material_dates, sorted(material_dates))
for title in material_titles:
self.assertTrue(
Material.objects.filter(
title=title, category="MODEL_ORGANISM", has_pre_print=True
).exists()
)
def test_facets_return_number_of_materials(self):
self.client.force_authenticate(user=self.primary_prof)
# Search with no params
search_url = reverse("search-materials-list")
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
model_organism_count = int(response.json()["facets"]["category"]["MODEL_ORGANISM"])
self.assertEqual(
model_organism_count, len(Material.objects.filter(category="MODEL_ORGANISM"))
)
# Search for only danio rerio organisms
search_url = reverse("search-materials-list") + "?search=danio rerio"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
model_organism_count = int(response.json()["facets"]["category"]["MODEL_ORGANISM"])
database_count = 0
for material in Material.objects.all():
if material.organisms:
if ("Danio rerio" in material.organisms) and (
material.category == "MODEL_ORGANISM"
):
database_count += 1
self.assertEqual(model_organism_count, database_count)
def test_empty_search_returns_no_results(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-materials-list") + "?search="
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
material_count = int(response.json()["count"])
self.assertEqual(material_count, 0)
class SearchUsersEndpointTestCase(APITestCase):
"""
Tests /search/users operations.
"""
@classmethod
def setUpClass(cls):
super(SearchUsersEndpointTestCase, cls).setUpClass()
populate_dev_database()
# Put newly created materials in the search index
call_command("search_index", "-f", "--rebuild")
cls.primary_prof = User.objects.get(username="PrimaryProf")
@classmethod
def tearDownClass(cls):
super(SearchUsersEndpointTestCase, cls).tearDownClass()
# Rebuild search index with what's actaully in the django database
call_command("search_index", "-f", "--rebuild")
def test_search_for_name_returns_given_user(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = (
reverse("search-users-list")
+ "?search="
+ self.primary_prof.first_name
+ " "
+ self.primary_prof.last_name
)
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_result_id = response.json()["results"][0]["id"]
self.assertEqual(first_result_id, str(self.primary_prof.id))
def test_order_by_published_name_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-users-list") + "?ordering=published_name"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_published_names = []
for user in response.json()["results"]:
if user["published_name"]:
user_published_names.append(user["published_name"])
self.assertEqual(user_published_names, sorted(user_published_names))
def test_empty_search_returns_no_results(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-users-list") + "?search="
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user_count = int(response.json()["count"])
self.assertEqual(user_count, 0)
class SearchOrganizationsEndpointTestCase(APITestCase):
"""
Tests /search/organizations operations.
"""
@classmethod
def setUpClass(cls):
super(SearchOrganizationsEndpointTestCase, cls).setUpClass()
populate_dev_database()
# Put newly created materials in the search index
call_command("search_index", "-f", "--rebuild")
cls.primary_prof = User.objects.get(username="PrimaryProf")
cls.primary_lab = Organization.objects.get(name="PrimaryLab")
@classmethod
def tearDownClass(cls):
super(SearchOrganizationsEndpointTestCase, cls).tearDownClass()
# Rebuild search index with what's actaully in the django database
call_command("search_index", "-f", "--rebuild")
def test_search_for_organization_name_returns_given_organization(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-organizations-list") + "?search=" + self.primary_lab.name
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_result_id = int(response.json()["results"][0]["id"])
self.assertEqual(first_result_id, self.primary_lab.id)
def test_search_for_owner_attribute_returns_related_organizations(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-organizations-list") + "?search=" + self.primary_prof.email
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organization_count = int(response.json()["count"])
organization_names = []
for org in response.json()["results"]:
organization_names.append(org["name"])
self.assertEqual(
organization_count, len(Organization.objects.filter(owner=self.primary_prof))
)
for name in organization_names:
self.assertTrue(
Organization.objects.filter(name=name, owner=self.primary_prof).exists()
)
def test_ordering_on_updated_at_succeeds(self):
self.client.force_authenticate(user=self.primary_prof)
search_url = reverse("search-organizations-list") + "?ordering=" + "updated_at"
response = self.client.get(search_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
organization_dates = []
for org in response.json()["results"]:
date = datetime.datetime.strptime(org["updated_at"], "%Y-%m-%dT%H:%M:%S.%f%z").date()
organization_dates.append(date)
self.assertEqual(organization_dates, sorted(organization_dates))
| 2.1875 | 2 |
mindarmour/utils/logger.py | hboshnak/mindarmour | 139 | 7125 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Util for log module. """
import logging
_LOGGER = logging.getLogger('MA')
def _find_caller():
"""
Bind findCaller() method, which is used to find the stack frame of the
caller so that we can note the source file name, line number and
function name.
"""
return _LOGGER.findCaller()
class LogUtil:
"""
Logging module.
Raises:
SyntaxError: If create this class.
"""
_instance = None
_logger = None
_extra_fmt = ' [%s] [%s] '
def __init__(self):
raise SyntaxError('can not instance, please use get_instance.')
@staticmethod
def get_instance():
"""
Get instance of class `LogUtil`.
Returns:
Object, instance of class `LogUtil`.
"""
if LogUtil._instance is None:
LogUtil._instance = object.__new__(LogUtil)
LogUtil._logger = _LOGGER
LogUtil._init_logger()
return LogUtil._instance
@staticmethod
def _init_logger():
"""
Initialize logger.
"""
LogUtil._logger.setLevel(logging.WARNING)
log_fmt = '[%(levelname)s] %(name)s(%(process)d:%(thread)d,' \
'%(processName)s):%(asctime)s%(message)s'
log_fmt = logging.Formatter(log_fmt)
# create console handler with a higher log level
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_fmt)
# add the handlers to the logger
LogUtil._logger.handlers = []
LogUtil._logger.addHandler(console_handler)
LogUtil._logger.propagate = False
def set_level(self, level):
"""
Set the logging level of this logger, level must be an integer or a
string. Supported levels are 'NOTSET'(integer: 0), 'ERROR'(integer: 1-40),
'WARNING'('WARN', integer: 1-30), 'INFO'(integer: 1-20) and 'DEBUG'(integer: 1-10).
For example, if logger.set_level('WARNING') or logger.set_level(21), then
logger.warn() and logger.error() in scripts would be printed while running,
while logger.info() or logger.debug() would not be printed.
Args:
level (Union[int, str]): Level of logger.
"""
self._logger.setLevel(level)
def add_handler(self, handler):
"""
Add other handler supported by logging module.
Args:
handler (logging.Handler): Other handler supported by logging module.
Raises:
ValueError: If handler is not an instance of logging.Handler.
"""
if isinstance(handler, logging.Handler):
self._logger.addHandler(handler)
else:
raise ValueError('handler must be an instance of logging.Handler,'
' but got {}'.format(type(handler)))
def debug(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'DEBUG'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.debug(self._extra_fmt + msg, file_info, tag, *args)
def info(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'INFO'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.info(self._extra_fmt + msg, file_info, tag, *args)
def warn(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'WARNING'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.warning(self._extra_fmt + msg, file_info, tag, *args)
def error(self, tag, msg, *args):
"""
Log '[tag] msg % args' with severity 'ERROR'.
Args:
tag (str): Logger tag.
msg (str): Logger message.
args (Any): Auxiliary value.
"""
caller_info = _find_caller()
file_info = ':'.join([caller_info[0], str(caller_info[1])])
self._logger.error(self._extra_fmt + msg, file_info, tag, *args)
| 2.171875 | 2 |
Python/Programming Basics/Simple Calculations/17. Daily Earnings.py | teodoramilcheva/softuni-software-engineering | 0 | 7126 | workdays = float(input())
daily_tips = float(input())
exchange_rate = float(input())
salary = workdays * daily_tips
annual_income = salary * 12 + salary * 2.5
net_income = annual_income - annual_income * 25 / 100
result = net_income / 365 * exchange_rate
print('%.2f' % result)
| 3.671875 | 4 |
bert_rerannker_eval.py | satya77/transformer_rankers | 0 | 7127 | <gh_stars>0
from transformer_rankers.trainers import transformer_trainer
from transformer_rankers.datasets import dataset, preprocess_scisumm_ranked
from transformer_rankers.eval import results_analyses_tools
from transformers import BertTokenizer, BertForSequenceClassification
from sacred.observers import FileStorageObserver
from sacred import Experiment
import numpy as np
import torch
import pandas as pd
import argparse
import logging
import sys
ex = Experiment('BERT-ranker experiment')
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.StreamHandler(sys.stdout)
]
)
@ex.main
def run_experiment(args):
args.run_id = str(ex.current_run._id)
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
train, valid, test = preprocess_scisumm_ranked.transform_to_dfs(
args.path_to_ranked_file,args.path_to_ranked_test,args.path_to_ranked_dev)
# Choose the negative candidate sampler
ns_train=None
ns_val=None
# Create the loaders for the datasets, with the respective negative samplers
dataloader = dataset.QueryDocumentDataLoader(train, valid, test,
tokenizer, ns_train, ns_val,
'classification', args.val_batch_size,
args.val_batch_size, 512,
0, args.data_folder + "/scisumm_ranked")
with_ranked_list=True
train_loader, val_loader, test_loader = dataloader.get_pytorch_dataloaders(with_ranked_list)
# Instantiate transformer model to be used
model = BertForSequenceClassification.from_pretrained('bert-base-cased')
model.resize_token_embeddings(len(dataloader.tokenizer))
e = torch.load(args.model_dir)
model.load_state_dict(e)
model.eval()
# Instantiate trainer that handles fitting.
trainer = transformer_trainer.TransformerTrainer(model, train_loader, val_loader, test_loader,
0, "classification", tokenizer,
False, 0,
0 ,0, 0)
# Predict for test
logging.info("Predicting")
preds, labels, doc_ids, all_queries, preds_without_acc = trainer.test()
# res = results_analyses_tools.evaluate_and_aggregate(preds, labels, ['R_10@1',
# 'R_10@2',
# 'R_10@5',
# 'R_2@1',
# 'accuracy_0.3',
# 'accuracy_0.3_upto_1',
# 'precision_0.3',
# 'recall_0.3',
# 'f_score_0.3',
# 'accuracy_0.4',
# 'accuracy_0.4_upto_1',
# 'precision_0.4',
# 'recall_0.4',
# 'f_score_0.4',
# 'accuracy_0.5',
# 'accuracy_0.5_upto_1',
# 'precision_0.5',
# 'recall_0.5',
# 'f_score_0.5'
# ])
# for metric, v in res.items():
# logging.info("Test {} : {:4f}".format(metric, v))
# # Saving predictions and labels to a file
# max_preds_column = max([len(l) for l in preds])
# preds_df = pd.DataFrame(preds, columns=["prediction_" + str(i) for i in range(max_preds_column)])
# preds_df.to_csv(args.output_dir + "/" + args.run_id + "/predictions.csv", index=False)
#
# labels_df = pd.DataFrame(labels, columns=["label_" + str(i) for i in range(max_preds_column)])
# labels_df.to_csv(args.output_dir + "/" + args.run_id + "/labels.csv", index=False)
# # predict on the test set
# preds, labels, doc_ids, all_queries, preds_without_acc = trainer.test()
new_preds=list((np.array(preds_without_acc)> 0.4).astype(int))
d = {'query': all_queries, 'doc_id': doc_ids,'label': new_preds, 'similiarity':preds_without_acc}
df_doc_ids = pd.DataFrame(d)
import pdb
pdb.set_trace()
df_doc_ids = df_doc_ids.groupby('query').agg(list).reset_index()
# df_doc_ids_ones = df_doc_ids[df_doc_ids['label']==1]
# df_doc_ids_ones = df_doc_ids_ones.groupby('query').agg(list).reset_index()
# df_doc_ids_non_ones = df_doc_ids.groupby('query').agg(list).reset_index()
# new_df=[]
# for i,row in df_doc_ids_non_ones.iterrows():
# if all([v == 0 for v in row['label']]):
# highest_value=[x for _, x in sorted(zip(row['similiarity'], row['doc_id']), key=lambda pair: pair[0])]
# highest_value_sim=[x for x in sorted(row['similiarity'])]
#
# row['label'] = [1]
# row[ 'doc_id'] = [highest_value[0]]
# row[ 'similiarity'] = [highest_value_sim[0]]
#
# new_df.append(row)
# result = pd.concat([df_doc_ids,pd.DataFrame(new_df)])
df_doc_ids.to_csv(args.output_dir + "/" + args.run_id + "/doc_ids_test_all_results.csv", index=False, sep='\t')
return trainer.best_ndcg
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_folder", default=None, type=str, required=True,
help="the folder containing data")
parser.add_argument("--model_dir", default=None, type=str, required=True,
help="the folder that the model is saved in.")
parser.add_argument("--val_batch_size", default=32, type=int, required=False,
help="Validation and test batch size.")
parser.add_argument("--path_to_ranked_file", default=None, type=str, required=False,
help="if there is a ranked file this will be the path to it. ")
parser.add_argument("--path_to_ranked_test", default=None, type=str, required=False,
help="if there is a ranked test file this will be the path to it. ")
parser.add_argument("--path_to_ranked_dev", default=None, type=str, required=False,
help="if there is a ranked test file this will be the path to it. ")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="the folder to output predictions")
args = parser.parse_args()
args.sacred_ex = ex
ex.observers.append(FileStorageObserver(args.output_dir))
ex.add_config({'args': args})
return ex.run()
if __name__ == "__main__":
main() | 2.078125 | 2 |
python/p21.py | tonyfg/project_euler | 0 | 7128 | <gh_stars>0
#Q: Evaluate the sum of all the amicable numbers under 10000.
#A: 31626
def divisor_sum(n):
return sum([i for i in xrange (1, n//2+1) if not n%i])
def sum_amicable(start, end):
sum = 0
for i in xrange(start, end):
tmp = divisor_sum(i)
if i == divisor_sum(tmp) and i != tmp:
sum += i+tmp
return sum/2 #each pair is found twice, so divide by 2 ;)
print sum_amicable(1,10000)
| 3.578125 | 4 |
check.py | Dysoncat/student-services-slas-chat-bot | 0 | 7129 | <filename>check.py
import long_responses as long
# Returns the probability of a message matching the responses that we have
def messageProb(userMessage, recognizedWords, isSingleResponse=False, requiredWords=[]):
messageCertainty = 0
hasRequiredWords = True
# Counts how many words are present in each predefined message
for word in userMessage:
if word in recognizedWords:
messageCertainty += 1
# Calculates the percent of recognized words in a user message
percentage = float(messageCertainty) / float(len(recognizedWords))
# Checks that the required words are in the string
for word in requiredWords:
if word not in userMessage:
hasRequiredWords = False
break
# Must either have the required words, or be a single response
if hasRequiredWords or isSingleResponse:
return int(percentage * 100)
else:
return 0
# Checks all the responses using the probability of the messages
def checkAllMesages(message):
highest_prob_list = {}
ignore_list = {}
def ignoreResponse(bot_response, list_of_words, single_response=False, required_words=[]):
nonlocal ignore_list
ignore_list[bot_response] = messageProb(
message, list_of_words, single_response, required_words)
# Simplifies response creation / adds it to the dict
def response(bot_response, list_of_words, single_response=False, required_words=[]):
nonlocal highest_prob_list
highest_prob_list[bot_response] = messageProb(
message, list_of_words, single_response, required_words)
# Responses -------------------------------------------------------------------------------------------------------
response('Hello!', ['hello', 'hi', 'hey',
'sup', 'heyo'], single_response=True)
response('See you!', ['bye', 'goodbye'], single_response=True)
response('I\'m doing fine, and you?', [
'how', 'are', 'you', 'doing'], required_words=['how', "you"])
response('You\'re welcome!', ['thank', 'thanks'], single_response=True)
response("You can borrow a computer from room 315", ["how", "do", "i", "borrow", "a", "computer"], required_words=["borrow", "computer"])
response("You can apply for a new locker key in room 310", ["how", "can", "i", "apply", "for", "a", "new", "locker", "key"], ["new", "locker", "key"])
response("The guidance office is on the third floor", [
"where", "is", "the", "guidance", "office"], required_words=["guidance", "office"])
response("You can apply for the ID in room 310", [
"how", "can", "i", "get", "new", "id"], ["new", "id"])
response("A student ID costs 25 RMB, and it has to be in cash", [
"how", "much", "does", "a", "new", "id", "cost"], ["id", "cost"])
response("The secondary computer classroom is on the fifth floor, and is number 521", [
"where", "is", "the", "secondary", "computer", "classroom"], ["secondary", "computer"])
response("Don't worry about it.", ["sorry", "sry"], ["sorry", "sry"])
# Ignored Responses
ignoreResponse("Good to hear", [
"i", "doing", "good", "fine", "ok"], required_words=["i", "good"])
best_ignore_match = max(ignore_list, key=ignore_list.get)
# Longer responses
response(long.R_ADVICE, ['give', 'advice'], required_words=['advice'])
response(long.R_EATING, ['what', 'you', 'eat'],
required_words=['you', 'eat'])
response(long.R_SWEARING, [
"fuck", "shit", "motherfucker", "fuck", "you"])
best_match = max(highest_prob_list, key=highest_prob_list.get)
# DEBUGGING TOOLS IF NEEDED
print(highest_prob_list)
print("")
print(
f'Best match = {best_match} | Score: {highest_prob_list[best_match]}')
if highest_prob_list[best_match] < ignore_list[best_ignore_match]:
return best_ignore_match
elif highest_prob_list[best_match] < 1:
return long.unknown()
else:
return best_match
| 3.046875 | 3 |
image_predictor/utils.py | jdalzatec/streamlit-manizales-tech-talks | 2 | 7130 | <reponame>jdalzatec/streamlit-manizales-tech-talks<filename>image_predictor/utils.py<gh_stars>1-10
from io import StringIO
import numpy as np
from h5py import File
from keras.models import load_model as keras_load_model
from PIL import Image, ImageOps
def predict(image, model):
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# Replace this with the path to your image
image = Image.open(image)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
return prediction[0]
def read_labels(labels_file):
labels = []
lines = StringIO(labels_file.getvalue().decode()).readlines()
for line in lines:
_, *remaining = line.split()
label = " ".join(remaining).strip()
labels.append(label)
return labels
def load_model(model_file):
return keras_load_model(File(model_file))
| 3.15625 | 3 |
client/setup.py | emilywoods/docker-workshop | 1 | 7131 | <filename>client/setup.py<gh_stars>1-10
from setuptools import setup
setup(
name="workshop-client",
install_requires=["flask==1.1.1", "requests==2.22.0"],
python_requires=">=3.7",
classifiers=[
"Development Status :: 1 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 1.382813 | 1 |
tests/facebook/models/test_photo.py | Socian-Ltd/python-facebook-1 | 2 | 7132 | <filename>tests/facebook/models/test_photo.py
import json
import unittest
import pyfacebook.models as models
class PhotoModelTest(unittest.TestCase):
BASE_PATH = "testdata/facebook/models/photos/"
with open(BASE_PATH + 'photo.json', 'rb') as f:
PHOTO_INFO = json.loads(f.read().decode('utf-8'))
def testPhoto(self):
m = models.Photo.new_from_json_dict(self.PHOTO_INFO)
self.assertEqual(m.id, "166370841591183")
self.assertEqual(m.album.id, "108824087345859")
self.assertEqual(len(m.images), 8)
self.assertEqual(m.webp_images[0].height, 800)
| 2.859375 | 3 |
airbyte-integrations/connectors/source-scaffold-source-python/source_scaffold_source_python/source.py | curanaj/airbyte-dbt-demo | 0 | 7133 | # MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from datetime import datetime
from typing import Dict, Generator
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
Status,
Type,
)
from airbyte_cdk.sources import Source
class SourceScaffoldSourcePython(Source):
def check(self, logger: AirbyteLogger, config: json) -> AirbyteConnectionStatus:
"""
Tests if the input configuration can be used to successfully connect to the integration
e.g: if a provided Stripe API token can be used to connect to the Stripe API.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteConnectionStatus indicating a Success or Failure
"""
try:
# Not Implemented
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {str(e)}")
def discover(self, logger: AirbyteLogger, config: json) -> AirbyteCatalog:
"""
Returns an AirbyteCatalog representing the available streams and fields in this integration.
For example, given valid credentials to a Postgres database,
returns an Airbyte catalog where each postgres table is a stream, and each table column is a field.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:return: AirbyteCatalog is an object describing a list of all available streams in this source.
A stream is an AirbyteStream object that includes:
- its stream name (or table name in the case of Postgres)
- json_schema providing the specifications of expected schema for this stream (a list of columns described
by their names and types)
"""
streams = []
stream_name = "TableName" # Example
json_schema = { # Example
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {"columnName": {"type": "string"}},
}
# Not Implemented
streams.append(AirbyteStream(name=stream_name, json_schema=json_schema))
return AirbyteCatalog(streams=streams)
def read(
self, logger: AirbyteLogger, config: json, catalog: ConfiguredAirbyteCatalog, state: Dict[str, any]
) -> Generator[AirbyteMessage, None, None]:
"""
Returns a generator of the AirbyteMessages generated by reading the source with the given configuration,
catalog, and state.
:param logger: Logging object to display debug/info/error to the logs
(logs will not be accessible via airbyte UI if they are not passed to this logger)
:param config: Json object containing the configuration of this source, content of this json is as specified in
the properties of the spec.json file
:param catalog: The input catalog is a ConfiguredAirbyteCatalog which is almost the same as AirbyteCatalog
returned by discover(), but
in addition, it's been configured in the UI! For each particular stream and field, there may have been provided
with extra modifications such as: filtering streams and/or columns out, renaming some entities, etc
:param state: When a Airbyte reads data from a source, it might need to keep a checkpoint cursor to resume
replication in the future from that saved checkpoint.
This is the object that is provided with state from previous runs and avoid replicating the entire set of
data everytime.
:return: A generator that produces a stream of AirbyteRecordMessage contained in AirbyteMessage object.
"""
stream_name = "TableName" # Example
data = {"columnName": "Hello World"} # Example
# Not Implemented
yield AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(stream=stream_name, data=data, emitted_at=int(datetime.now().timestamp()) * 1000),
)
| 1.632813 | 2 |
alerter/src/monitorables/nodes/chainlink_node.py | SimplyVC/panic | 41 | 7134 | from datetime import datetime
from typing import Optional, Dict, List, Union
from schema import Schema, Or
from src.monitorables.nodes.node import Node
from src.utils.exceptions import InvalidDictSchemaException
class ChainlinkNode(Node):
def __init__(self, node_name: str, node_id: str, parent_id: str) -> None:
super().__init__(node_name, node_id, parent_id)
# Metrics
self._went_down_at_prometheus = None
self._current_height = None
self._total_block_headers_received = None
self._max_pending_tx_delay = None
self._process_start_time_seconds = None
self._total_gas_bumps = None
self._total_gas_bumps_exceeds_limit = None
self._no_of_unconfirmed_txs = None
self._total_errored_job_runs = None
self._current_gas_price_info = {
'percentile': None,
'price': None,
}
self._eth_balance_info = {}
# This variable stores the url of the source used to get prometheus node
# data. Note that this had to be done because multiple prometheus
# sources can be associated with the same node, where at the same time
# only one source is available, and sources switch from time to time.
self._last_prometheus_source_used = None
# This stores the timestamp of the last successful monitoring round.
self._last_monitored_prometheus = None
@property
def is_down_prometheus(self) -> bool:
return self._went_down_at_prometheus is not None
@property
def went_down_at_prometheus(self) -> Optional[float]:
return self._went_down_at_prometheus
@property
def current_height(self) -> Optional[int]:
return self._current_height
@property
def total_block_headers_received(self) -> Optional[int]:
return self._total_block_headers_received
@property
def max_pending_tx_delay(self) -> Optional[int]:
return self._max_pending_tx_delay
@property
def process_start_time_seconds(self) -> Optional[float]:
return self._process_start_time_seconds
@property
def total_gas_bumps(self) -> Optional[int]:
return self._total_gas_bumps
@property
def total_gas_bumps_exceeds_limit(self) -> Optional[int]:
return self._total_gas_bumps_exceeds_limit
@property
def no_of_unconfirmed_txs(self) -> Optional[int]:
return self._no_of_unconfirmed_txs
@property
def total_errored_job_runs(self) -> Optional[int]:
return self._total_errored_job_runs
@property
def current_gas_price_info(self) -> Dict[str, Optional[float]]:
return self._current_gas_price_info
@property
def eth_balance_info(self) -> Dict[str, Union[str, float]]:
return self._eth_balance_info
@property
def last_prometheus_source_used(self) -> Optional[str]:
return self._last_prometheus_source_used
@property
def last_monitored_prometheus(self) -> Optional[float]:
return self._last_monitored_prometheus
@staticmethod
def get_int_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing integer prometheus
: metrics.
"""
return [
'current_height',
'total_block_headers_received',
'max_pending_tx_delay', 'total_gas_bumps',
'total_gas_bumps_exceeds_limit', 'no_of_unconfirmed_txs',
'total_errored_job_runs'
]
@staticmethod
def get_float_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing float prometheus
: metrics.
"""
return [
'went_down_at_prometheus', 'process_start_time_seconds',
'last_monitored_prometheus'
]
@staticmethod
def get_dict_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing dict prometheus
: metrics.
"""
return ['current_gas_price_info', 'eth_balance_info']
@staticmethod
def get_str_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing string prometheus
: metrics.
"""
return ['last_prometheus_source_used']
def get_all_prometheus_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing prometheus metrics
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [
*str_prometheus_metric_attributes,
*int_prometheus_metric_attributes,
*float_prometheus_metric_attributes,
*dict_prometheus_metric_attributes
]
def get_int_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing int metrics.
"""
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
return [*int_prometheus_metric_attributes]
def get_float_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing float metrics.
"""
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
return [*float_prometheus_metric_attributes]
def get_dict_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing dict metrics.
"""
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [*dict_prometheus_metric_attributes]
def get_str_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing str metrics.
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
return [*str_prometheus_metric_attributes]
def get_all_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing metrics
"""
prometheus_metric_attributes = \
self.get_all_prometheus_metric_attributes()
return [*prometheus_metric_attributes]
def set_went_down_at_prometheus(
self, went_down_at_prometheus: Optional[float]) -> None:
self._went_down_at_prometheus = went_down_at_prometheus
def set_prometheus_as_down(self, downtime: Optional[float]) -> None:
"""
This function sets the node's prometheus interface as down. It sets the
time that the interface was initially down to the parameter 'downtime'
if it is not None, otherwise it sets it to the current timestamp.
:param downtime:
:return:
"""
if downtime is None:
self.set_went_down_at_prometheus(datetime.now().timestamp())
else:
self.set_went_down_at_prometheus(downtime)
def set_prometheus_as_up(self) -> None:
"""
This function sets a node's prometheus interface as up. A node's
interface is said to be up if went_down_at_prometheus is None.
:return: None
"""
self.set_went_down_at_prometheus(None)
def set_current_height(self, new_height: Optional[int]) -> None:
self._current_height = new_height
def set_total_block_headers_received(
self, new_total_block_headers_received: Optional[int]) -> None:
self._total_block_headers_received = new_total_block_headers_received
def set_max_pending_tx_delay(
self, new_max_pending_tx_delay: Optional[int]) -> None:
self._max_pending_tx_delay = new_max_pending_tx_delay
def set_process_start_time_seconds(
self, new_process_start_time_seconds: Optional[float]) -> None:
self._process_start_time_seconds = new_process_start_time_seconds
def set_total_gas_bumps(self, new_total_gas_bumps: Optional[int]) -> None:
self._total_gas_bumps = new_total_gas_bumps
def set_total_gas_bumps_exceeds_limit(
self, new_total_gas_bumps_exceeds_limit: Optional[int]) -> None:
self._total_gas_bumps_exceeds_limit = new_total_gas_bumps_exceeds_limit
def set_no_of_unconfirmed_txs(
self, new_no_of_unconfirmed_txs: Optional[int]) -> None:
self._no_of_unconfirmed_txs = new_no_of_unconfirmed_txs
def set_total_errored_job_runs(
self, new_total_errored_job_runs: Optional[int]) -> None:
self._total_errored_job_runs = new_total_errored_job_runs
def set_current_gas_price_info(self, new_percentile: Optional[float],
new_price: Optional[float]) -> None:
"""
This method sets the current_gas_price_info dict based on the new
percentile and price. This is done in this way to protect the Dict
schema.
:param new_percentile: The new percentile to be stored
:param new_price: The new gas to be stored
:return: None
"""
self._current_gas_price_info['percentile'] = new_percentile
self._current_gas_price_info['price'] = new_price
@staticmethod
def _new_eth_balance_info_valid(new_eth_balance_info: Dict) -> bool:
"""
This method checks that the new eth_balance_info dict obeys the required
schema.
:param new_eth_balance_info: The dict to check
:return: True if the dict obeys the required schema
: False otherwise
"""
schema = Schema(Or({
'address': str,
'balance': float,
'latest_usage': float,
}, {}))
return schema.is_valid(new_eth_balance_info)
def set_eth_balance_info(
self, new_eth_balance_info: Dict[str, Union[str, float]]) -> None:
"""
This method sets the new_eth_balance_info. It first checks that the new
dict obeys the required schema. If not, an InvalidDictSchemaException is
raised.
:param new_eth_balance_info: The new eth_balance_info to store.
:return: None
"""""
if self._new_eth_balance_info_valid(new_eth_balance_info):
self._eth_balance_info = new_eth_balance_info
else:
raise InvalidDictSchemaException('new_eth_balance_info')
def set_last_prometheus_source_used(
self, new_last_prometheus_source_used: Optional[str]) -> None:
self._last_prometheus_source_used = new_last_prometheus_source_used
def set_last_monitored_prometheus(
self, new_last_monitored_prometheus: Optional[float]) -> None:
self._last_monitored_prometheus = new_last_monitored_prometheus
def reset(self) -> None:
"""
This method resets all metrics to their initial state
:return: None
"""
self.set_went_down_at_prometheus(None)
self.set_current_height(None)
self.set_total_block_headers_received(None)
self.set_max_pending_tx_delay(None)
self.set_process_start_time_seconds(None)
self.set_total_gas_bumps(None)
self.set_total_gas_bumps_exceeds_limit(None)
self.set_no_of_unconfirmed_txs(None)
self.set_total_errored_job_runs(None)
self.set_current_gas_price_info(None, None)
self.set_eth_balance_info({})
self.set_last_prometheus_source_used(None)
self.set_last_monitored_prometheus(None)
| 2.421875 | 2 |
experiments/vgg16/VGG16_utils.py | petrapoklukar/DCA | 2 | 7135 | import pickle
import numpy as np
import os
def _analyze_query_point_assignment(
query_data_dict: dict,
init_Rdata_dict: dict,
init_Edata_dict: dict,
num_R: int,
query_point_assignment_array: np.ndarray,
root: str,
n_points_to_copy=50,
):
"""
Analyzes and visualizes qDCA results.
:param query_data_dict: raw query data.
:param init_Rdata_dict: raw R data.
:param init_Edata_dict: raw E data.
:param num_R: total number of R points.
:param query_point_assignment_array: query point assignments results.
:param root: root directory of the experiment.
:param n_points_to_copy: number of images to save.
:return: accuracy of qDCA assignments; list of (R, query) points with same label;
list of (R, query) points with different label
"""
true_query_data_labels = query_data_dict["labels"]
assigned_R = query_point_assignment_array[
query_point_assignment_array[:, 1] < num_R, 1
]
assigned_E = query_point_assignment_array[
query_point_assignment_array[:, 1] >= num_R, 1
]
assigned_R_labels = init_Rdata_dict["labels"][assigned_R]
assigned_E_labels = init_Edata_dict["labels"][assigned_E - num_R]
assigned_query_data_labels = np.empty(
shape=query_point_assignment_array.shape[0]
).astype(np.int32)
assigned_query_data_labels[
query_point_assignment_array[:, 1] < num_R
] = assigned_R_labels
assigned_query_data_labels[
query_point_assignment_array[:, 1] >= num_R
] = assigned_E_labels
accuracy = (
true_query_data_labels == assigned_query_data_labels
).sum() / assigned_query_data_labels.shape[0]
same_label_idx = np.where(true_query_data_labels == assigned_query_data_labels)[0]
wrong_label_idx = np.where(true_query_data_labels != assigned_query_data_labels)[0]
correct_pairs = []
for i in query_point_assignment_array[same_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
wrong_pairs = []
for i in query_point_assignment_array[wrong_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
with open(
os.path.join(root, "logs", "analyzed_query_point_assignments.pkl"), "wb"
) as f:
pickle.dump(
{
"accuracy": accuracy,
"same_label_idx": same_label_idx,
"wrong_label_idx": wrong_label_idx,
"correct_pairs": correct_pairs,
"wrong_pairs": wrong_pairs,
"query_point_assignment_array": query_point_assignment_array,
},
f,
)
same_label_image_path = os.path.join(root, "visualization", "same_label_images")
wrong_label_image_path = os.path.join(root, "visualization", "wrong_label_images")
if not os.path.exists(wrong_label_image_path):
os.mkdir(wrong_label_image_path)
if not os.path.exists(same_label_image_path):
os.mkdir(same_label_image_path)
for i in range(n_points_to_copy):
query_image_path, init_image_path, query_label, init_label = correct_pairs[i]
path_to_copy = os.path.join(
same_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(init_image_path, path_to_copy))
path_to_copy2 = os.path.join(
same_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(query_image_path, path_to_copy2))
(
w_query_image_path,
w_init_image_path,
w_query_label,
w_init_label,
) = wrong_pairs[i]
path_to_copy_w = os.path.join(
wrong_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(w_query_label), str(w_init_label)
),
)
os.system("cp {0} {1}".format(w_init_image_path, path_to_copy_w))
path_to_copy_w2 = os.path.join(
wrong_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
i, w_query_label, w_init_label
),
)
os.system("cp {0} {1}".format(w_query_image_path, path_to_copy_w2))
return accuracy, correct_pairs, wrong_pairs
def _generate_query_sets(version: str, N: int = 5000):
"""
Generates query sets for qDCA experiment in Section 4.3.
:param version: either version1 (dogs vs kitchen utils) or version2 (random).
:param N: number of points to sample for R used in DCA.
"""
with open(f"representations/vgg16/{version}/Rfeatures.pkl", "rb") as f:
Rdata_v1 = pickle.load(f)
with open(f"representations/vgg16/{version}/Efeatures.pkl", "rb") as f:
Edata_v1 = pickle.load(f)
init_Ridxs = np.random.choice(
np.arange(len(Rdata_v1["feat_lin1"])), size=N, replace=False
)
query_Ridxs = np.setdiff1d(np.arange(len(Rdata_v1["feat_lin1"])), init_Ridxs)
init_Eidxs = np.random.choice(
np.arange(len(Edata_v1["feat_lin1"])), size=N, replace=False
)
query_Eidxs = np.setdiff1d(np.arange(len(Edata_v1["feat_lin1"])), init_Eidxs)
with open(f"representations/vgg16/{version}/sampled_Rfeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Rdata_v1["feat_lin1"][init_Ridxs],
"feat_lin2": Rdata_v1["feat_lin2"][init_Ridxs],
"labels": Rdata_v1["labels"][init_Ridxs],
"paths": np.array(Rdata_v1["paths"])[init_Ridxs],
"init_Ridx": init_Ridxs,
"query_Ridx": query_Ridxs,
},
f,
)
with open(f"representations/vgg16/{version}/sampled_Efeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Edata_v1["feat_lin1"][init_Eidxs],
"feat_lin2": Edata_v1["feat_lin2"][init_Eidxs],
"labels": Edata_v1["labels"][init_Eidxs],
"paths": np.array(Edata_v1["paths"])[init_Eidxs],
"init_Eidx": init_Eidxs,
"query_Eidx": query_Eidxs,
},
f,
)
with open(f"representations/vgg16/{version}/query_features.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": np.concatenate(
[
Rdata_v1["feat_lin1"][query_Ridxs],
Edata_v1["feat_lin1"][query_Eidxs],
]
),
"feat_lin2": np.concatenate(
[
Rdata_v1["feat_lin2"][query_Ridxs],
Edata_v1["feat_lin2"][query_Eidxs],
]
),
"labels": np.concatenate(
[Rdata_v1["labels"][query_Ridxs], Edata_v1["labels"][query_Eidxs]]
),
"paths": np.concatenate(
[
np.array(Rdata_v1["paths"])[query_Ridxs],
np.array(Edata_v1["paths"])[query_Eidxs],
]
),
"init_Eidxs": init_Eidxs,
"query_Eidxs": query_Eidxs,
"init_Ridxs": init_Ridxs,
"query_Ridxs": query_Ridxs,
},
f,
)
| 2.609375 | 3 |
back-end/RawFishSheep/app_cart/views.py | Coldarra/RawFishSheep | 0 | 7136 | <reponame>Coldarra/RawFishSheep
from .models import *
from decorator import *
from app_goods.views import getGoodsByID
# 查询当前用户所有的购物车信息
def getCartByUser(user_id=None):
if user_id == None:
raise ParamException()
return Cart.objects.filter(user_id=user_id)
def getSelectedCart(user_id=None):
if user_id == None:
raise ParamException()
return Cart.objects.filter(user_id=user_id, selection="1")
def getCartByGoods(user_id=None, goods_id=None):
if None in [user_id, goods_id]:
raise ParamException()
if Cart.objects.filter(user_id=user_id, goods_id=goods_id).count() <= 0:
raise RFSException("40012", "无效购物车商品")
return Cart.objects.get(user_id=user_id, goods_id=goods_id)
def checkCartByGoods(user_id, goods_id):
return Cart.objects.filter(user_id=user_id, goods_id=goods_id).count() > 0
def createCart(user_id=None, goods_id=None, amount=None):
if None in [user_id, goods_id, amount]:
raise ParamException()
if checkCartByGoods(user_id, goods_id):
appendToCart(user_id, goods_id, amount)
return Cart.objects.create(
user_id=user_id, goods_id=goods_id, amount=amount)
def appendToCart(user_id=None, goods_id=None, amount=None):
if None in [user_id, goods_id, amount]:
raise ParamException()
amount = int(amount)
if getGoodsByID(goods_id).remain < amount:
raise RFSException("40013", "商品余辆不足")
if checkCartByGoods(user_id, goods_id):
cart_obj = getCartByGoods(user_id, goods_id)
cart_obj.amount += amount
cart_obj.save()
return cart_obj
else:
return createCart(user_id, goods_id, amount)
def deleteCartByGoods(user_id=None, goods_id=None):
if None in [user_id, goods_id]:
raise ParamException()
Cart.objects.filter(user_id=user_id,
goods_id=goods_id).delete()
def deleteCartByUser(user_id=None):
if None in [user_id, goods_id]:
raise ParamException()
Cart.objects.filter(user_id=user_id).delete()
def deleteSelectedCart(user_id=None):
if user_id == None:
raise ParamException()
Cart.objects.filter(user_id=user_id, selection="1").delete()
def setCartAmount(user_id=None, goods_id=None, amount=None):
if None in [user_id, goods_id, amount]:
raise ParamException()
amount = int(amount)
cart = getCartByGoods(user_id, goods_id)
if amount <= 0:
raise RFSException("40033", "购物车商品数量非法")
cart.amount = amount
cart.save()
return cart
def setCartSelection(user_id=None, goods_id=None, selection=None):
# 检测参数是否合法
if None in [user_id, goods_id, selection]:
raise ParamException()
cart = getCartByGoods(user_id, goods_id)
# 检测商品状态是否合法
if cart.selection != "0" and cart.selection != "1":
raise RFSException("40033", "状态非法")
# 改变商品状态
cart.selection = selection
cart.save()
return cart
| 2.265625 | 2 |
extensions/catsum.py | johannesgiorgis/my-timewarrior-extensions | 0 | 7137 | #!/usr/bin/env python3
###############################################################################
#
# Category Summaries
#
#
###############################################################################
import datetime
import io
import json
import logging
import pprint
import sys
from typing import Dict, Any
from dateutil import tz
# set logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create handler
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
LOG_FORMAT = "[%(asctime)s - %(levelname)-8s - %(module)s:%(name)s ] %(message)s"
c_format = logging.Formatter(LOG_FORMAT)
c_handler.setFormatter(c_format)
# Add handlers to the logger
logger.addHandler(c_handler)
DATE_FORMAT = "%Y%m%dT%H%M%SZ"
# TODO: Convert to defaultdict
# https://www.accelebrate.com/blog/using-defaultdict-python
# https://stackoverflow.com/questions/9358983/dictionaries-and-default-values
# https://docs.python.org/2/library/collections.html#collections.defaultdict
CATEGORIES: dict = {
"PT": "Personal Time",
"PW": "Planned Work",
"UW": "Unplanned Work",
"OW": "Other Work",
}
def main():
print("~" * 100)
totals = calculate_totals(sys.stdin)
# print(totals)
if not totals:
sys.exit(0)
categories_total = extract_categories(totals)
# All Categories Statistics
category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_category_breakdown = format_category_breakdown(category_percent_breakdown)
display_category_breakdown(formatted_category_breakdown)
# remove personal category
categories_total.pop("Personal Time", None)
work_category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_work_category_breakdown = format_category_breakdown(work_category_percent_breakdown)
display_category_breakdown(formatted_work_category_breakdown)
# formatted_category_breakdown.pop("Personal Time", None)
# formatted
# print(type(formatted_category_breakdown))
# print(formatted_category_breakdown.keys())
def format_seconds(seconds: int) -> str:
"""
Convert seconds to a formatted string
Convert seconds: 3661
To formatted: " 1:01:01"
"""
# print(seconds, type(seconds))
hours = seconds // 3600
minutes = seconds % 3600 // 60
seconds = seconds % 60
return f"{hours:4d}:{minutes:02d}:{seconds:02d}"
def calculate_totals(input_stream: io.TextIOWrapper) -> Dict[str, datetime.timedelta]:
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
# Extract the configuration settings.
header = 1
configuration = dict()
body = ""
for line in input_stream:
if header:
if line == "\n":
header = 0
else:
fields = line.strip().split(": ", 2)
if len(fields) == 2:
configuration[fields[0]] = fields[1]
else:
configuration[fields[0]] = ""
else:
body += line
# Sum the seconds tracked by tag
totals = dict()
untagged = None
j = json.loads(body)
for object in j:
start = datetime.datetime.strptime(object["start"], DATE_FORMAT)
if "end" in object:
end = datetime.datetime.strptime(object["end"], DATE_FORMAT)
else:
end = datetime.datetime.utcnow()
tracked = end - start
if "tags" not in object or object["tags"] == []:
if untagged is None:
untagged = tracked
else:
untagged += tracked
else:
for tag in object["tags"]:
if tag in totals:
totals[tag] += tracked
else:
totals[tag] = tracked
if "temp.report.start" not in configuration:
print("There is no data in the database")
return totals
start_utc = datetime.datetime.strptime(configuration["temp.report.start"], DATE_FORMAT)
start_utc = start_utc.replace(tzinfo=from_zone)
start = start_utc.astimezone(to_zone)
if "temp.report.end" in configuration:
end_utc = datetime.datetime.strptime(configuration["temp.report.end"], DATE_FORMAT)
end_utc = end_utc.replace(tzinfo=from_zone)
end = end_utc.astimezone(to_zone)
else:
end = datetime.datetime.now()
if len(totals) == 0 and untagged is None:
print(f"No data in the range {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
print(f"\nCategory Summary Data for {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
def extract_categories(totals: Dict[str, datetime.timedelta]) -> Dict[str, datetime.timedelta]:
categories_total = {}
for category, category_full_name in CATEGORIES.items():
categories_total[category_full_name] = totals.get(category, datetime.timedelta(0))
return categories_total
def get_category_percent_breakdown(
category_run_times: Dict[str, datetime.timedelta]
) -> Dict[str, Any]:
logger.debug("Getting category percentage breakdown...")
total_time = sum([run_time.total_seconds() for run_time in category_run_times.values()])
logger.debug(f"Total Time:{total_time}")
category_percentage_breakdown: dict = {}
for category, run_time in category_run_times.items():
category_percent = run_time.total_seconds() / total_time
category_percentage_breakdown[category] = {
"percent": category_percent,
"duration": run_time.total_seconds() / 60,
"run_time": format_seconds(int(run_time.total_seconds())),
}
# add total time statistics
category_percentage_breakdown["Total"] = {
"percent": total_time / total_time,
"duration": total_time / 60,
"run_time": format_seconds(int(total_time)),
}
logger.debug(pprint.pformat(category_percentage_breakdown))
return category_percentage_breakdown
def format_category_breakdown(category_breakdown: dict) -> Dict[str, Any]:
# print(type(category_breakdown))
# pprint.pprint(category_breakdown)
formatted_category_breakdown = {}
for category, category_statistics in category_breakdown.items():
formatted_category_breakdown[category] = {
# convert duration to mins
"duration": round(category_statistics["duration"], 2),
"percent": round(category_statistics["percent"] * 100, 2),
"run_time": category_statistics["run_time"],
}
return formatted_category_breakdown
def display_category_breakdown(category_breakdown: dict, title: str = "Category Breakdown"):
# Determine largest width
max_width = len("Category")
for category_statistics in category_breakdown.values():
if len(category_statistics) > max_width:
max_width = len(category_statistics)
print_dotted_line()
print(f"\t\t{title.capitalize():>{max_width}}")
print(
f"{'Category':{max_width}}\t"
f"{'Duration':{max_width}}\t"
f"{'Run_Time':>{max_width + 2}}\t"
f"{'Percent':{max_width + 1}}"
)
for category, category_statistics in category_breakdown.items():
print(
f"{category:{max_width}}\t"
f"{category_statistics['duration']:{max_width}}\t"
f"{category_statistics['run_time']:}\t"
f"{category_statistics['percent']}%"
)
print_dotted_line()
def print_dotted_line(width: int = 72):
"""Print a dotted (rather 'dashed') line"""
print("-" * width)
if __name__ == "__main__":
main()
| 2.421875 | 2 |
resources/hotel.py | jnascimentocode/REST-API-COM-PYTHON-E-FLASK | 0 | 7138 | from typing import ParamSpecArgs
from flask_restful import Resource, reqparse
from models.hotel import HotelModel
from flask_jwt_extended import jwt_required
from models.site import SiteModel
from resources.filtros import *
import sqlite3
path_params = reqparse.RequestParser()
path_params.add_argument('cidade', type=str)
path_params.add_argument('estrelas_min', type=float)
path_params.add_argument('estrelas_max', type=float)
path_params.add_argument('diaria_min', type=float)
path_params.add_argument('diaria_max', type=float)
path_params.add_argument('limit', type=float)
path_params.add_argument('offset', type=float)
class Hoteis(Resource):
def get(self):
connection = sqlite3.connect('banco.db')
cursor = connection.cursor()
dados = path_params.parse_args()
dados_validos = {chave:dados[chave] for chave in dados if dados[chave] is not None}
parametros = normalize_path_params(**dados_validos)
if not parametros.get('cidade'):
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_sem_cidade, tupla)
else:
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_com_cidade, tupla)
hoteis = []
for linha in resultado:
hoteis.append({
'hotel_id': linha[0],
'nome': linha[1],
'estrelas': linha[2],
'diaria': linha[3],
'cidade': linha[4],
'site_id': linha[5]
})
return {'hoteis': hoteis}
class Hotel(Resource):
argumentos = reqparse.RequestParser()
argumentos.add_argument('nome', type=str, required=True, help="The field 'nome' cannot be left blank")
argumentos.add_argument('estrelas', type=float, required=True, help="The field 'estrelas' cannot be left blank")
argumentos.add_argument('diaria')
argumentos.add_argument('cidade')
argumentos.add_argument('site_id', type=int, required=True, help="Every hotel needs to be linked with site")
def get(self, hotel_id):
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
return hotel.json()
return {'message': 'Hotel not found.'}, 404
@jwt_required()
def post(self, hotel_id):
if HotelModel.find_hotel(hotel_id):
return {"message": "Hotel id '{}' already exists.".format(hotel_id)}, 400
dados = Hotel.argumentos.parse_args()
hotel = HotelModel(hotel_id, **dados)
if not SiteModel.find_by_id(dados.get('site_id')):
return {'message': 'The hotel must be associated to a valid site id'}, 400
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json()
@jwt_required()
def put(self, hotel_id):
dados = Hotel.argumentos.parse_args()
hotel_encontrado = HotelModel.find_hotel(hotel_id)
if hotel_encontrado:
hotel_encontrado.update_hotel(**dados)
hotel_encontrado.save_hotel()
return hotel_encontrado.json(), 200
hotel = HotelModel(hotel_id, **dados)
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json(), 201 #created
@jwt_required()
def delete(self, hotel_id):
global hoteis
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
try:
hotel.delete_hotel()
except:
return {'message': 'An error occurred trying to delete hotel.'}, 500
return {'message': 'Hotel deleted.'}
return {'message': 'Hotel not found.'}, 404
| 2.484375 | 2 |
src/wormhole/__main__.py | dmgolembiowski/magic-wormhole | 2,801 | 7139 | from __future__ import absolute_import, print_function, unicode_literals
if __name__ == "__main__":
from .cli import cli
cli.wormhole()
else:
# raise ImportError('this module should not be imported')
pass
| 1.601563 | 2 |
testing/berge_equilibrium_cndp.py | Eliezer-Beczi/CNDP | 1 | 7140 | <reponame>Eliezer-Beczi/CNDP<filename>testing/berge_equilibrium_cndp.py
import networkx as nx
import utils.connectivity_metrics as connectivity_metric
from platypus import NSGAII, EpsMOEA, NSGAIII, EpsNSGAII, Problem, Dominance, Subset, TournamentSelector, \
HypervolumeFitnessEvaluator, Archive
import statistics
import multiprocessing as mp
G = nx.read_adjlist("input/Ventresca/BarabasiAlbert_n500m1.txt")
k = 50
num_of_tests = 10
def get_pairwise_connectivity(exclude=None):
if exclude is None:
exclude = {}
S = set(exclude)
subgraph = nx.subgraph_view(G, filter_node=lambda n: n not in S)
return connectivity_metric.pairwise_connectivity(subgraph)
class CNDP(Problem):
def __init__(self):
super(CNDP, self).__init__(1, 1)
self.types[:] = Subset(list(G), k)
def evaluate(self, solution):
solution.objectives[0] = get_pairwise_connectivity(solution.variables[0])
class BergeDominance(Dominance):
def __init__(self):
super(BergeDominance, self).__init__()
def compare(self, x, y):
k1 = 0
k2 = 0
nodes_x = x.variables[0][:]
nodes_y = y.variables[0][:]
metric_x = x.objectives[0]
metric_y = y.objectives[0]
for i in range(k):
tmp = nodes_y[i]
nodes_y[i] = nodes_x[i]
if get_pairwise_connectivity(nodes_y) < metric_x:
k1 += 1
nodes_y[i] = tmp
for i in range(k):
tmp = nodes_x[i]
nodes_x[i] = nodes_y[i]
if get_pairwise_connectivity(nodes_x) < metric_y:
k2 += 1
nodes_x[i] = tmp
if k1 < k2:
return -1
elif k1 > k2:
return 1
else:
return 0
class BergeArchive(Archive):
def __init__(self):
super(BergeArchive, self).__init__(dominance=BergeDominance())
def get_critical_nodes():
algorithm = NSGAII(CNDP(), selector=TournamentSelector(dominance=BergeDominance()), archive=BergeArchive())
algorithm.run(1000)
fitness = algorithm.result[0].objectives[0]
print(fitness)
return fitness
if __name__ == '__main__':
pool = mp.Pool(mp.cpu_count())
samples = pool.starmap_async(get_critical_nodes, [() for _ in range(num_of_tests)]).get()
pool.close()
avg = sum(samples) / len(samples)
stdev = statistics.stdev(samples)
print(f"Average: {avg}")
print(f"Standard Deviation: {stdev}")
| 2.609375 | 3 |
policykit/django_db_logger/migrations/0002_initial.py | mashton/policyk | 78 | 7141 | # Generated by Django 3.2.2 on 2021-09-02 15:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('django_db_logger', '0001_initial'),
('policyengine', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='evaluationlog',
name='community',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='policyengine.community'),
),
migrations.AddField(
model_name='evaluationlog',
name='proposal',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='policyengine.proposal'),
),
]
| 1.539063 | 2 |
Charm/models/risk_functions.py | TanyaAdams1/Charm | 17 | 7142 | <filename>Charm/models/risk_functions.py
import numpy as np
from mcerp import *
from uncertainties.core import AffineScalarFunc
class RiskFunction(object):
def get_risk(self, bar, p):
""" Computes risk for perf array w.r.t. bar.
Args:
bar: reference performance bar.
perfs: performance array-like.
Returns:
single float (mean risk)
"""
if isinstance(p, UncertainFunction):
return self.func(bar, p._mcpts)
elif isinstance(p, AffineScalarFunc):
#TODO: what should we return? How to define risk analytically?
raise ValueError('Risk -- Undefined behavior.')
else:
return self.func(bar, [p])
def get_name(self):
name = type(self).__name__
return name[:name.find('Function')]
class DollarValueFunction(RiskFunction):
def dollar_function(self, bar, perf):
value = .0
for p in perf:
normed_p = float(p)/bar
if normed_p < .6:
value += 100
elif normed_p < .8:
value += 200
elif normed_p < .9:
value += 300
elif normed_p < 1.0:
value += 600
else:
value += 1000
return 1000 - value/len(perf)
def __init__(self):
self.func = self.dollar_function
class StepRiskFunction(RiskFunction):
def step_function(self, bar, perf):
return float(len([p for p in perf if p < bar]))/len(perf)
def __init__(self):
self.func = self.step_function
class LinearRiskFunction(RiskFunction):
def linear_cutoff_function(self, bar, perf):
# risk = a * (perf-bar)
a = 1
risk = []
for p in perf:
base = bar - p
if base > 0:
risk.append(a * base)
return np.mean(risk) if risk else 0
def __init__(self):
self.func = self.linear_cutoff_function
class QuadraticRiskFunction(RiskFunction):
def quadratic_cutoff_function(self, bar, perf):
# risk = a * (perf-bar)**2 + b * (perf-bar) + c
risk = []
a = 4
b = 0
c = 0
for p in perf:
base = (bar - p)/bar
if base > 0:
risk.append(a*base**2 + b*base + c)
return np.mean(risk) if risk else 0
def __init__(self):
self.func = self.quadratic_cutoff_function
class ExponentialRiskFunction(RiskFunction):
def exponential_cutoff_function(self, bar, perf):
# risk = a ** (perf-bar)
risk = []
a = 2.718
for p in perf:
base = (bar - p)/bar
if base > 0:
risk.append(a ** base)
return np.mean(risk) if risk else 0
def __init__(self):
self.func = self.exponential_cutoff_function
class RiskFunctionCollection(object):
funcs = {'step': StepRiskFunction(),
'linear': LinearRiskFunction(),
'quad': QuadraticRiskFunction(),
'exp': ExponentialRiskFunction(),
'dollar': DollarValueFunction()}
| 2.515625 | 3 |
code/doubanUtils.py | verazuo/douban_crawler | 1 | 7143 | <gh_stars>1-10
import requests
import re
from bs4 import BeautifulSoup
def nextPageLink(sess,soup,page,head=""):
NextPage=soup.find(class_='next').link.get('href')
req=sess.get(head + NextPage)
print(f'第{page}页:',req.status_code)
return BeautifulSoup(req.text,'html.parser') | 2.8125 | 3 |
491/491.py | kaixiang1992/python-flask | 0 | 7144 | from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
# TODO: db_uri
# dialect+driver://username:password@host:port/database?charset=utf8
DB_URI = 'mysql+pymysql://root:[email protected]:3300/first_sqlalchemy?charset=utf8'
engine = create_engine(DB_URI)
Base = declarative_base(bind=engine)
session = sessionmaker(bind=engine)()
# TODO: 定义User模型
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50), nullable=False)
def __repr__(self):
return '<User(id={id}, name={name})>'.format(id=self.id, name=self.name)
# TODO: 创建Article模型
class Article(Base):
__tablename__ = 'article'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(50), nullable=False)
# TODO: 外键约束
uid = Column(Integer, ForeignKey('user.id'), nullable=False)
authors = relationship('User', backref='articles')
# TODO: 删除数据库
# Base.metadata.drop_all()
# TODO: 创建数据库
# Base.metadata.create_all()
#
# user = User(name='zhiliao')
# article1 = Article(title='python')
# article2 = Article(title='flask')
#
# user.articles.append(article1)
# user.articles.append(article2)
# TODO: 提交数据
# session.add(user)
# session.commit()
# TODO: 1.session.delete进行删除,不指定`nullable=False`
# TODO: 2.session.delete进行删除,指定`nullable=False`,避免删除行为
user = session.query(User).first()
print(user)
session.delete(user)
session.commit()
| 2.984375 | 3 |
spacy/tests/tagger/test_lemmatizer.py | TerminalWitchcraft/spaCy | 1 | 7145 | # coding: utf-8
from __future__ import unicode_literals
from ...lemmatizer import read_index, read_exc
import pytest
@pytest.mark.models
@pytest.mark.parametrize('text,lemmas', [("aardwolves", ["aardwolf"]),
("aardwolf", ["aardwolf"]),
("planets", ["planet"]),
("ring", ["ring"]),
("axes", ["axis", "axe", "ax"])])
def test_tagger_lemmatizer_noun_lemmas(lemmatizer, text, lemmas):
if lemmatizer is None:
return None
assert lemmatizer.noun(text) == set(lemmas)
@pytest.mark.models
def test_tagger_lemmatizer_base_forms(lemmatizer):
if lemmatizer is None:
return None
assert lemmatizer.noun('dive', {'number': 'sing'}) == set(['dive'])
assert lemmatizer.noun('dive', {'number': 'plur'}) == set(['diva'])
@pytest.mark.models
def test_tagger_lemmatizer_base_form_verb(lemmatizer):
if lemmatizer is None:
return None
assert lemmatizer.verb('saw', {'verbform': 'past'}) == set(['see'])
@pytest.mark.models
def test_tagger_lemmatizer_punct(lemmatizer):
if lemmatizer is None:
return None
assert lemmatizer.punct('“') == set(['"'])
assert lemmatizer.punct('“') == set(['"'])
@pytest.mark.models
def test_tagger_lemmatizer_read_index(path):
if path is not None:
with (path / 'wordnet' / 'index.noun').open() as file_:
index = read_index(file_)
assert 'man' in index
assert 'plantes' not in index
assert 'plant' in index
@pytest.mark.models
@pytest.mark.parametrize('text,lemma', [("was", "be")])
def test_tagger_lemmatizer_read_exc(path, text, lemma):
if path is not None:
with (path / 'wordnet' / 'verb.exc').open() as file_:
exc = read_exc(file_)
assert exc[text] == (lemma,)
@pytest.mark.models
def test_tagger_lemmatizer_lemma_assignment(EN):
text = "Bananas in pyjamas are geese."
doc = EN.tokenizer(text)
assert all(t.lemma_ == '' for t in doc)
EN.tagger(doc)
assert all(t.lemma_ != '' for t in doc)
| 2.421875 | 2 |
sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py | RAY-316/azure-sdk-for-python | 0 | 7146 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AcquiredPhoneNumbers
from ._models_py3 import CommunicationError
from ._models_py3 import CommunicationErrorResponse
from ._models_py3 import PhoneNumberCapabilities
from ._models_py3 import PhoneNumberCapabilitiesRequest
from ._models_py3 import PhoneNumberCost
from ._models_py3 import PhoneNumberOperation
from ._models_py3 import PhoneNumberPurchaseRequest
from ._models_py3 import PhoneNumberSearchRequest
from ._models_py3 import PhoneNumberSearchResult
from ._models_py3 import PurchasedPhoneNumber
except (SyntaxError, ImportError):
from ._models import AcquiredPhoneNumbers # type: ignore
from ._models import CommunicationError # type: ignore
from ._models import CommunicationErrorResponse # type: ignore
from ._models import PhoneNumberCapabilities # type: ignore
from ._models import PhoneNumberCapabilitiesRequest # type: ignore
from ._models import PhoneNumberCost # type: ignore
from ._models import PhoneNumberOperation # type: ignore
from ._models import PhoneNumberPurchaseRequest # type: ignore
from ._models import PhoneNumberSearchRequest # type: ignore
from ._models import PhoneNumberSearchResult # type: ignore
from ._models import PurchasedPhoneNumber # type: ignore
from ._phone_numbers_client_enums import (
BillingFrequency,
PhoneNumberAssignmentType,
PhoneNumberCapabilityType,
PhoneNumberOperationStatus,
PhoneNumberOperationType,
PhoneNumberType,
)
__all__ = [
'AcquiredPhoneNumbers',
'CommunicationError',
'CommunicationErrorResponse',
'PhoneNumberCapabilities',
'PhoneNumberCapabilitiesRequest',
'PhoneNumberCost',
'PhoneNumberOperation',
'PhoneNumberPurchaseRequest',
'PhoneNumberSearchRequest',
'PhoneNumberSearchResult',
'PurchasedPhoneNumber',
'BillingFrequency',
'PhoneNumberAssignmentType',
'PhoneNumberCapabilityType',
'PhoneNumberOperationStatus',
'PhoneNumberOperationType',
'PhoneNumberType',
]
| 1.46875 | 1 |
AlgoNet2/Helper.py | Bhaney44/AlgorandDevelopment | 0 | 7147 | import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
def visualize_training_results(results):
"""
Plots the loss and accuracy for the training and testing data
"""
history = results.history
plt.figure(figsize=(12,4))
plt.plot(history['val_loss'])
plt.plot(history['loss'])
plt.legend(['val_loss', 'loss'])
plt.title('Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
plt.figure(figsize=(12,4))
plt.plot(history['val_accuracy'])
plt.plot(history['accuracy'])
plt.legend(['val_accuracy', 'accuracy'])
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
def split_sequence(seq, n_steps_in, n_steps_out):
"""
Splits the univariate time sequence
"""
X, y = [], []
for i in range(len(seq)):
end = i + n_steps_in
out_end = end + n_steps_out
if out_end > len(seq):
break
seq_x, seq_y = seq[i:end], seq[end:out_end]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def layer_maker(n_layers, n_nodes, activation, drop=None, d_rate=.5):
"""
Create a specified number of hidden layers for an RNN
Optional: Adds regularization option, dropout layer to prevent potential overfitting if necessary
"""
model = Sequential()
# Creating the specified number of hidden layers with the specified number of nodes
for x in range(1,n_layers+1):
model.add(LSTM(n_nodes, activation=activation, return_sequences=True))
# Adds a Dropout layer after every Nth hidden layer (the 'drop' variable)
try:
if x % drop == 0:
model.add(Dropout(d_rate))
except:
pass
| 3.453125 | 3 |
apex/contrib/multihead_attn/self_multihead_attn_func.py | Muflhi01/apex | 6,523 | 7148 | import torch
import torch.nn.functional as F
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
use_time_mask,
is_training,
heads,
scale,
inputs,
input_weights,
output_weights,
input_biases,
output_biases,
mask,
is_additive_mask,
dropout_prob,
):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
scale_t = torch.tensor([scale])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
# Input Linear GEMM
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim*3]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (seql_q*seqs x embed_dim*3)
if use_biases_t[0]:
input_lin_results = torch.addmm(
input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
input_lin_results = torch.mm(
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)), input_weights.transpose(0, 1)
)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul1_results = torch.empty(
(queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype, device=torch.device("cuda")
)
matmul1_results = torch.baddbmm(
matmul1_results,
queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results,
beta=0.0,
alpha=scale_t[0],
)
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert len(mask.size()) == 2, "Timing mask is not 2D!"
assert mask.size(0) == mask.size(1), "Sequence length should match!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float("-inf"))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
if is_additive_mask:
matmul1_results = matmul1_results + mask.unsqueeze(1).unsqueeze(2)
else:
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float("-inf"))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1.0 - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
matmul2_results = torch.empty(
(dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype,
device=torch.device("cuda"),
).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
matmul2_results = (
matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1), inputs.size(2))
)
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
if use_biases_t[0]:
outputs = torch.addmm(
output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1.0,
alpha=1.0,
)
else:
outputs = torch.mm(
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)), output_weights.transpose(0, 1)
)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
ctx.save_for_backward(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
(
use_biases_t,
heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
) = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t[0]
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
# Slice out q,k,v from one big set of gradients entering the input linear's bprop (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights
)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)),
)
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
if use_biases_t[0]:
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0
)
else:
output_bias_grads = None
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
# Mask and Scaling for Dropout (not a publically documented op)
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
queries_grads = torch.baddbmm(
queries_grads.transpose(0, 1),
softmax_grads,
keys.transpose(0, 1),
out=queries_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
keys_grads = torch.baddbmm(
keys_grads.transpose(0, 1),
softmax_grads.transpose(1, 2),
queries.transpose(0, 1),
out=keys_grads.transpose(0, 1),
beta=0.0,
alpha=scale_t[0],
)
# Input Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_results_grads = input_lin_results_grads.view(
inputs.size(0) * inputs.size(1), heads_t[0] * 3 * head_dim
)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
# Input Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, 3*embed_dim(3072)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (3*embed_dim x embed_dim)
input_weight_grads = torch.mm(
input_lin_results_grads.transpose(0, 1), inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2))
)
if use_biases_t[0]:
input_bias_grads = torch.sum(input_lin_results_grads, 0)
else:
input_bias_grads = None
return (
None,
None,
None,
None,
input_grads,
input_weight_grads,
output_weight_grads,
input_bias_grads,
output_bias_grads,
None,
None,
)
self_attn_func = SelfAttnFunc.apply
| 2.25 | 2 |
api/app/reviews/models.py | NikolaSiplakova/Baobab | 0 | 7149 | <reponame>NikolaSiplakova/Baobab<filename>api/app/reviews/models.py
from datetime import datetime
from app import db
from app.utils import misc
class ReviewForm(db.Model):
id = db.Column(db.Integer(), primary_key=True)
application_form_id = db.Column(db.Integer(), db.ForeignKey('application_form.id'), nullable=False)
is_open = db.Column(db.Boolean(), nullable=False)
deadline = db.Column(db.DateTime(), nullable=False)
application_form = db.relationship('ApplicationForm', foreign_keys=[application_form_id])
review_questions = db.relationship('ReviewQuestion')
def __init__(self, application_form_id, deadline):
self.application_form_id = application_form_id
self.is_open = True
self.deadline = deadline
def close(self):
self.is_open = False
class ReviewQuestion(db.Model):
id = db.Column(db.Integer, primary_key=True)
review_form_id = db.Column(db.Integer(), db.ForeignKey('review_form.id'), nullable=False)
question_id = db.Column(db.Integer(), db.ForeignKey('question.id'), nullable=True)
type = db.Column(db.String(), nullable=False)
is_required = db.Column(db.Boolean(), nullable=False)
order = db.Column(db.Integer(), nullable=False)
weight = db.Column(db.Float(), nullable=False)
review_form = db.relationship('ReviewForm', foreign_keys=[review_form_id])
question = db.relationship('Question', foreign_keys=[question_id])
translations = db.relationship('ReviewQuestionTranslation', lazy='dynamic')
def __init__(self,
review_form_id,
question_id,
type,
is_required,
order,
weight):
self.review_form_id = review_form_id
self.question_id = question_id
self.type = type
self.is_required = is_required
self.order = order
self.weight = weight
def get_translation(self, language):
translation = self.translations.filter_by(language=language).first()
return translation
class ReviewQuestionTranslation(db.Model):
__tablename__ = 'review_question_translation'
__table_args__ = tuple([db.UniqueConstraint('review_question_id', 'language', name='uq_review_question_id_language')])
id = db.Column(db.Integer(), primary_key=True)
review_question_id = db.Column(db.Integer(), db.ForeignKey('review_question.id'), nullable=False)
language = db.Column(db.String(2), nullable=False)
description = db.Column(db.String(), nullable=True)
headline = db.Column(db.String(), nullable=True)
placeholder = db.Column(db.String(), nullable=True)
options = db.Column(db.JSON(), nullable=True)
validation_regex = db.Column(db.String(), nullable=True)
validation_text = db.Column(db.String(), nullable=True)
def __init__(self,
review_question_id,
language,
description=None,
headline=None,
placeholder=None,
options=None,
validation_regex=None,
validation_text=None):
self.review_question_id = review_question_id
self.language = language
self.description = description
self.headline = headline
self.placeholder = placeholder
self.options = options
self.validation_regex = validation_regex
self.validation_text = validation_text
class ReviewResponse(db.Model):
id = db.Column(db.Integer(), primary_key=True)
review_form_id = db.Column(db.Integer(), db.ForeignKey('review_form.id'), nullable=False)
reviewer_user_id = db.Column(db.Integer(), db.ForeignKey('app_user.id'), nullable=False)
response_id = db.Column(db.Integer(), db.ForeignKey('response.id'), nullable=False)
submitted_timestamp = db.Column(db.DateTime(), nullable=False)
language = db.Column(db.String(2), nullable=False)
is_submitted = db.Column(db.Boolean(), nullable=False)
submitted_timestamp = db.Column(db.DateTime(), nullable=True)
review_form = db.relationship('ReviewForm', foreign_keys=[review_form_id])
reviewer_user = db.relationship('AppUser', foreign_keys=[reviewer_user_id])
response = db.relationship('Response', foreign_keys=[response_id])
review_scores = db.relationship('ReviewScore')
def __init__(self,
review_form_id,
reviewer_user_id,
response_id,
language):
self.review_form_id = review_form_id
self.reviewer_user_id = reviewer_user_id
self.response_id = response_id
self.language = language
self.is_submitted = False
def submit(self):
self.is_submitted = True
self.submitted_timestamp = datetime.now()
def calculate_score(self):
return sum([
misc.try_parse_float(score.value) * score.review_question.weight for score in self.review_scores
if score.review_question.weight > 0
])
class ReviewScore(db.Model):
id = db.Column(db.Integer(), primary_key=True)
review_response_id = db.Column(db.Integer(), db.ForeignKey('review_response.id'), nullable=False)
review_question_id = db.Column(db.Integer(), db.ForeignKey('review_question.id'), nullable=False)
value = db.Column(db.String(), nullable=False)
review_response = db.relationship('ReviewResponse', foreign_keys=[review_response_id])
review_question = db.relationship('ReviewQuestion', foreign_keys=[review_question_id])
def __init__(self,
review_question_id,
value):
self.review_question_id = review_question_id
self.value = value
class ReviewConfiguration(db.Model):
id = db.Column(db.Integer(), primary_key=True)
review_form_id = db.Column(db.Integer(), db.ForeignKey('review_form.id'), nullable=False)
num_reviews_required = db.Column(db.Integer(), nullable=False)
num_optional_reviews = db.Column(db.Integer(), nullable=False)
drop_optional_question_id = db.Column(db.Integer(), db.ForeignKey('review_question.id'), nullable=True)
drop_optional_agreement_values = db.Column(db.String(), nullable=True)
review_form = db.relationship('ReviewForm', foreign_keys=[review_form_id])
review_question = db.relationship('ReviewQuestion', foreign_keys=[drop_optional_question_id])
| 2.4375 | 2 |
speednet/vae/ConvVae.py | Abhranta/speednet | 1 | 7150 | <filename>speednet/vae/ConvVae.py
import torch.nn as nn
import torch
from utils import Flatten , Unflatten , weights_init , down_conv , up_conv
class Net(nn.Module):
def __init__(self , num_layers , img_dim , in_chan , act_func , latent_vector_size):
super(Net , self).__init__()
assert act_func in ("ReLU" , "LeakyReLU") , "Activation function that can be used now are ReLU and LeakyReLU"
assert img_dim % (2**(num_layers)) >= 0 , "Latent vector driven to 0, please increase image size or decreasenumber of layers"
self.act_func = act_func
self.in_chan = in_chan
self.num_layers = num_layers
self.latent_vector_size = latent_vector_size
self.in_chan2 = self.in_chan
self.encoder_net_layers = []
self.decoder_net_layers = []
self.out_chan = 2**5
for _ in range(num_layers):
self.encoder_net_layers.append(down_conv(self.in_chan , self.act_func , self.out_chan))
self.in_chan = self.out_chan*2
self.out_chan = self.out_chan*4
self.encoder = nn.Sequential(*self.encoder_net_layers ,
Flatten() ,
nn.Linear(((self.out_chan//2)*((img_dim//(2 ** num_layers))**2)) , self.latent_vector_size*4) ,
nn.ReLU(),
nn.Linear(self.latent_vector_size*4 , self.latent_vector_size*2) ,
nn.ReLU()
)
self.mu = nn.Linear(self.latent_vector_size*2 , self.latent_vector_size)
self.logvar = nn.Linear(self.latent_vector_size*2 , self.latent_vector_size)
self.out_chan2 = self.out_chan
for _ in range(num_layers):
self.decoder_net_layers.append(up_conv(self.out_chan2//2 , self.act_func , self.out_chan2//4))
self.out_chan2 = self.out_chan2//4
self.decoder = nn.Sequential(nn.Linear(self.latent_vector_size , self.latent_vector_size*4) ,
nn.ReLU() ,
nn.Linear(self.latent_vector_size*4 , ((self.out_chan//2)*((img_dim//(2 ** num_layers))**2))) ,
nn.ReLU() ,
Unflatten(self.out_chan//2 , (img_dim//(2 ** num_layers)) , (img_dim//(2 ** num_layers)) ) ,
*self.decoder_net_layers ,
nn.ConvTranspose2d(self.out_chan2//2 , self.in_chan2 , 3 , 1 , 1))
def encode(self , input_tensor):
encoded_vector = self.encoder(input_tensor)
mu , logvar = self.mu(encoded_vector) , self.logvar(encoded_vector)
return mu , logvar
def reparameterize(self , mu , logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
latent = mu + std*eps
return latent
def decode(self , latent):
decoded_vector = self.decoder(latent)
return decoded_vector
def forward(self , input_tensor):
mu , logvar = self.encode(input_tensor)
latent_space = self.reparameterize(mu , logvar)
return self.decode(latent_space) , mu , logvar | 2.546875 | 3 |
nelpy/utils.py | IsaacBusaleh/nelpy | 1 | 7151 | """This module contains helper functions and utilities for nelpy."""
__all__ = ['spatial_information',
'frange',
'swap_cols',
'swap_rows',
'pairwise',
'is_sorted',
'linear_merge',
'PrettyDuration',
'ddt_asa',
'get_contiguous_segments',
'get_events_boundaries',
'get_threshold_crossing_epochs',
'_bst_get_bins']
import numpy as np
import logging
from itertools import tee, repeat
from collections import namedtuple
from math import floor
from scipy.signal import hilbert
import scipy.ndimage.filters #import gaussian_filter1d, gaussian_filter
from numpy import log, ceil
import copy
import sys
import ctypes
from multiprocessing import Array, cpu_count
from multiprocessing.pool import Pool
import pdb
from . import core # so that core.RegularlySampledAnalogSignalArray is exposed
from . import auxiliary # so that auxiliary.TuningCurve1D is epxosed
from . import filtering
from .utils_.decorators import keyword_deprecation
# def sub2ind(array_shape, rows, cols):
# ind = rows*array_shape[1] + cols
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# return ind
# def ind2sub(array_shape, ind):
# # see also np.unravel_index(ind, array.shape)
# ind[ind < 0] = -1
# ind[ind >= array_shape[0]*array_shape[1]] = -1
# rows = (ind.astype('int') / array_shape[1])
# cols = ind % array_shape[1]
# return (rows, cols)
def ragged_array(arr):
"""Takes a list of arrays, and returns a ragged array.
See https://github.com/numpy/numpy/issues/12468
"""
n_elem = len(arr)
out = np.array(n_elem*[None])
for ii in range(out.shape[0]):
out[ii] = arr[ii]
return out
def asa_indices_within_epochs(asa, intervalarray):
"""Return indices of ASA within epochs.
[[start, stop]
...
[start, stop]]
so that data can be associated with asa._data[:,start:stop] for each epoch.
"""
indices = []
intervalarray = intervalarray[asa.support]
for interval in intervalarray.merge().data:
a_start = interval[0]
a_stop = interval[1]
frm, to = np.searchsorted(asa._abscissa_vals, (a_start, a_stop))
indices.append((frm, to))
indices = np.array(indices, ndmin=2)
return indices
def frange(start, stop, step):
"""arange with floating point step"""
# TODO: this function is not very general; we can extend it to work
# for reverse (stop < start), empty, and default args, etc.
# there are also many edge cases where this is weird.
# see https://stackoverflow.com/questions/7267226/range-for-floats
# for better alternatives.
num_steps = int(np.floor((stop-start)/step))
return np.linspace(start, stop, num=num_steps, endpoint=False)
def spatial_information(ratemap):
"""Compute the spatial information and firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
<NAME>., <NAME>., <NAME>., <NAME>.,
and <NAME>. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
"""
ratemap = copy.deepcopy(ratemap)
# ensure that the ratemap always has nonzero firing rates,
# otherwise the spatial information might return NaNs:
bkg_rate = ratemap[ratemap>0].min()
ratemap[ratemap < bkg_rate] = bkg_rate
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = np.transpose(ratemap, (2,1,0))
si = np.sum(np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1), axis=1)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
si = np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1)
else:
raise TypeError("rate map shape not supported / understood!")
return si/number_of_spatial_bins
def spatial_sparsity(ratemap):
"""Compute the firing sparsity...
The specificity index examines the amount of information
(in bits) that a single spike conveys about the animal's
location (i.e., how well cell firing predicts the animal's
location).The spatial information content of cell discharge was
calculated using the formula:
information content = \Sum P_i(R_i/R)log_2(R_i/R)
where i is the bin number, P_i, is the probability for occupancy
of bin i, R_i, is the mean firing rate for bin i, and R is the
overall mean firing rate.
In order to account for the effects of low firing rates (with
fewer spikes there is a tendency toward higher information
content) or random bursts of firing, the spike firing
time-series was randomly offset in time from the rat location
time-series, and the information content was calculated. A
distribution of the information content based on 100 such random
shifts was obtained and was used to compute a standardized score
(Zscore) of information content for that cell. While the
distribution is not composed of independent samples, it was
nominally normally distributed, and a Z value of 2.29 was chosen
as a cut-off for significance (the equivalent of a one-tailed
t-test with P = 0.01 under a normal distribution).
Reference(s)
------------
<NAME>., <NAME>., <NAME>., <NAME>.,
and <NAME>. (1994). "Spatial information content and
reliability of hippocampal CA1 neurons: effects of visual
input", Hippocampus, 4(4), 410-421.
Parameters
----------
occupancy : array of shape (n_bins,)
Occupancy of the animal.
ratemap : array of shape (n_units, n_bins)
Rate map in Hz.
Returns
-------
si : array of shape (n_units,)
spatial information (in bits) per unit
sparsity: array of shape (n_units,)
sparsity (in percent) for each unit
"""
number_of_spatial_bins = np.prod(ratemap.shape[1:])
weight_per_bin = 1/number_of_spatial_bins
Pi = 1
if len(ratemap.shape) == 3:
# we have 2D tuning curve, (n_units, n_x, n_y)
R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate
Ri = ratemap
sparsity = np.sum(np.sum((Ri*Pi), axis=1), axis=1)/(R**2)
elif len(ratemap.shape) == 2:
# we have 1D tuning curve, (n_units, n_x)
R = ratemap.mean(axis=1) # mean firing rate
Ri = ratemap.T
sparsity = np.sum((Pi*Ri.T), axis=1)/(R**2)
else:
raise TypeError("rate map shape not supported / understood!")
return sparsity/number_of_spatial_bins
def _bst_get_bins_inside_interval(interval, ds, w=1):
"""(np.array) Return bin edges entirely contained inside an interval.
Bin edges always start at interval.start, and continue for as many
bins as would fit entirely inside the interval.
NOTE 1: there are (n+1) bin edges associated with n bins.
WARNING: if an interval is smaller than ds, then no bin will be
associated with the particular interval.
NOTE 2: nelpy uses half-open intervals [a,b), but if the bin
width divides b-a, then the bins will cover the entire
range. For example, if interval = [0,2) and ds = 1, then
bins = [0,1,2], even though [0,2] is not contained in
[0,2). There might be numerical precision deviations from this?
Parameters
----------
interval : EpochArray
EpochArray containing a single interval with a start, and stop
ds : float
Time bin width, in seconds.
w : number of bins to use in a sliding window mode. Default is 1 (no sliding window).
For example, 40 ms bins, with a stride of 5 ms, can be achieved by using (ds=0.005, w=8)
For now, w has to be an integer, and therefore 5 second bins, with a stride of 2 seconds
are not supported within this framework.
Returns
-------
bins : array
Bin edges in an array of shape (n+1,) where n is the number
of bins
centers : array
Bin centers in an array of shape (n,) where n is the number
of bins
"""
if interval.length < ds:
return None, None
n_bins = int(np.floor(interval.length / ds)) # number of bins
# linspace is better than arange for non-integral steps
bins = np.linspace(interval.start, interval.start + n_bins*ds, n_bins+1)
if w > 1:
wn_bins = np.max((1, n_bins - w + 1))
wn_bins = bins[:wn_bins+1] + w/2*ds - ds/2
bins = wn_bins
centers = bins[:-1] + (ds / 2)
return bins, centers
def _bst_get_bins(intervalArray, ds, w=1):
"""
Docstring goes here. TBD. For use with bins that are contained
wholly inside the intervals.
"""
b = [] # bin list
c = [] # centers list
left_edges = []
right_edges = []
counter = 0
for interval in intervalArray:
bins, centers = _bst_get_bins_inside_interval(interval=interval, ds=ds, w=w)
if bins is not None:
left_edges.append(counter)
counter += len(centers) - 1
right_edges.append(counter)
counter += 1
b.extend(bins.tolist())
c.extend(centers.tolist())
bins = np.array(b)
bin_centers = np.array(c)
le = np.array(left_edges)
le = le[:, np.newaxis]
re = np.array(right_edges)
re = re[:, np.newaxis]
binned_support = np.hstack((le, re))
lengths = np.atleast_1d((binned_support[:,1] - binned_support[:,0] + 1).squeeze())
support_starts = bins[np.insert(np.cumsum(lengths+1),0,0)[:-1]]
support_stops = bins[np.insert(np.cumsum(lengths+1)-1,0,0)[1:]]
supportdata = np.vstack([support_starts, support_stops]).T
support = type(intervalArray)(supportdata) # set support to TRUE bin support
return bins, bin_centers, binned_support, support
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def get_mua(st, ds=None, sigma=None, truncate=None, _fast=True):
"""Compute the multiunit activity (MUA) from a spike train.
Parameters
----------
st : SpikeTrainArray
SpikeTrainArray containing one or more units.
-- OR --
st : BinnedSpikeTrainArray
BinnedSpikeTrainArray containing multiunit activity.
ds : float, optional
Time step in which to bin spikes. Default is 1 ms.
sigma : float, optional
Standard deviation (in seconds) of Gaussian smoothing kernel.
Default is 10 ms. If sigma==0 then no smoothing is applied.
truncate : float, optional
Bandwidth of the Gaussian filter. Default is 6.
Returns
-------
mua : AnalogSignalArray
AnalogSignalArray with MUA.
"""
if ds is None:
ds = 0.001 # 1 ms bin size
if sigma is None:
sigma = 0.01 # 10 ms standard deviation
if truncate is None:
truncate = 6
if isinstance(st, core.EventArray):
# bin spikes, so that we can count the spikes
mua_binned = st.bin(ds=ds).flatten()
elif isinstance(st, core.BinnedEventArray):
mua_binned = st.flatten()
ds = mua_binned.ds
else:
raise TypeError('st has to be one of (SpikeTrainArray, BinnedSpikeTrainArray)')
# make sure data type is float, so that smoothing works, and convert to rate
mua_binned._data = mua_binned._data.astype(float) / ds
# TODO: now that we can simply cast from BST to ASA and back, the following logic could be simplified:
# put mua rate inside an AnalogSignalArray
if _fast:
mua = core.AnalogSignalArray([], empty=True)
mua._data = mua_binned.data
mua._abscissa_vals = mua_binned.bin_centers
mua._abscissa.support = mua_binned.support
else:
mua = core.AnalogSignalArray(mua_binned.data, timestamps=mua_binned.bin_centers, fs=1/ds)
mua._fs = 1/ds
if (sigma != 0) and (truncate > 0):
mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)
return mua
def is_odd(n):
"""Returns True if n is odd, and False if n is even.
Assumes integer.
"""
return bool(n & 1)
def swap_cols(arr, frm, to):
"""swap columns of a 2D np.array"""
if arr.ndim > 1:
arr[:,[frm, to]] = arr[:,[to, frm]]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def swap_rows(arr, frm, to):
"""swap rows of a 2D np.array"""
if arr.ndim > 1:
arr[[frm, to],:] = arr[[to, frm],:]
else:
arr[frm], arr[to] = arr[to], arr[frm]
def pairwise(iterable):
"""returns a zip of all neighboring pairs.
This is used as a helper function for is_sorted.
Example
-------
>>> mylist = [2, 3, 6, 8, 7]
>>> list(pairwise(mylist))
[(2, 3), (3, 6), (6, 8), (8, 7)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def argsort(seq):
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key=seq.__getitem__)
def is_sorted_general(iterable, key=lambda a, b: a <= b):
"""Check to see if iterable is monotonic increasing (sorted)."""
return all(key(a, b) for a, b in pairwise(iterable))
def is_sorted(x, chunk_size=None):
"""Returns True if iterable is monotonic increasing (sorted).
NOTE: intended for 1D array, list or tuple. Will not work on
more than 1D
This function works in-core with memory footrpint XXX.
chunk_size = 100000 is probably a good choice.
"""
if not isinstance(x, (tuple, list, np.ndarray)):
raise TypeError("Unsupported type {}".format(type(x)))
x = np.atleast_1d(np.array(x).squeeze())
if x.ndim > 1:
raise ValueError("Input x must be 1-dimensional")
if chunk_size is None:
chunk_size = 500000
stop = x.size
for chunk_start in range(0, stop, chunk_size):
chunk_stop = int(min(stop, chunk_start + chunk_size + 1))
chunk = x[chunk_start:chunk_stop]
if not np.all(chunk[:-1] <= chunk[1:]):
return False
return True
def linear_merge(list1, list2):
"""Merge two SORTED lists in linear time.
UPDATED TO WORK WITH PYTHON 3.7+ (see https://stackoverflow.com/questions/51700960/runtimeerror-generator-raised-stopiteration-every-time-i-try-to-run-app)
Returns a generator of the merged result.
Examples
--------
>>> a = [1, 3, 5, 7]
>>> b = [2, 4, 6, 8]
>>> [i for i in linear_merge(a, b)]
[1, 2, 3, 4, 5, 6, 7, 8]
>>> [i for i in linear_merge(b, a)]
[1, 2, 3, 4, 5, 6, 7, 8]
>>> a = [1, 2, 2, 3]
>>> b = [2, 2, 4, 4]
>>> [i for i in linear_merge(a, b)]
[1, 2, 2, 2, 2, 3, 4, 4]
"""
# if any of the lists are empty, return the other (possibly also
# empty) list: (this is necessary because having either list1 or
# list2 be empty makes this quite a bit more complicated...)
if isinstance(list1, (list, np.ndarray)):
if len(list1) == 0:
list2 = iter(list2)
while True:
try:
yield next(list2)
except StopIteration:
return
if isinstance(list2, (list, np.ndarray)):
if len(list2) == 0:
list1 = iter(list1)
while True:
try:
yield next(list1)
except StopIteration:
return
list1 = iter(list1)
list2 = iter(list2)
value1 = next(list1)
value2 = next(list2)
# We'll normally exit this loop from a next() call raising
# StopIteration, which is how a generator function exits anyway.
while True:
if value1 <= value2:
# Yield the lower value.
try:
yield value1
except StopIteration:
return
try:
# Grab the next value from list1.
value1 = next(list1)
except StopIteration:
# list1 is empty. Yield the last value we received from list2, then
# yield the rest of list2.
try:
yield value2
except StopIteration:
return
while True:
try:
yield next(list2)
except StopIteration:
return
else:
try:
yield value2
except StopIteration:
return
try:
value2 = next(list2)
except StopIteration:
# list2 is empty.
try:
yield value1
except StopIteration:
return
while True:
try:
yield next(list1)
except StopIteration:
return
def get_mua_events(mua, fs=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None):
"""Determine MUA/PBEs from multiunit activity.
MUA : multiunit activity
PBE : population burst event
Parameters
----------
mua : AnalogSignalArray
AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
fs : float, optional
Sampling frequency of mua, in Hz. If not specified, it will be inferred from
mua.fs
minLength : float, optional
maxLength : float, optional
PrimaryThreshold : float, optional
SecondaryThreshold : float, optional
minThresholdLength : float, optional
Returns
-------
mua_epochs : EpochArray
EpochArray containing all the MUA events / PBEs.
Example
-------
mua = get_mua(spiketrain)
mua_epochs = get_mua_events(mua)
PBEs = get_PBEs(spiketrain, min_active=5)
= get_PBEs(get_mua_events(get_mua(*)), spiketrain, min_active=5)
"""
if fs is None:
fs = mua.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in mua!")
if PrimaryThreshold is None:
PrimaryThreshold = mua.mean() + 3*mua.std()
if SecondaryThreshold is None:
SecondaryThreshold = mua.mean()
if minLength is None:
minLength = 0.050 # 50 ms minimum event duration
if maxLength is None:
maxLength = 0.750 # 750 ms maximum event duration
if minThresholdLength is None:
minThresholdLength = 0.0
# determine MUA event bounds:
mua_bounds_idx, maxes, _ = get_events_boundaries(
x = mua.data,
PrimaryThreshold = PrimaryThreshold,
SecondaryThreshold = SecondaryThreshold,
minThresholdLength = minThresholdLength,
minLength = minLength,
maxLength = maxLength,
ds = 1/fs
)
if len(mua_bounds_idx) == 0:
logging.warning("no mua events detected")
return core.EpochArray(empty=True)
# store MUA bounds in an EpochArray
mua_epochs = core.EpochArray(mua.time[mua_bounds_idx])
return mua_epochs
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def get_PBEs(data, fs=None, ds=None, sigma=None, truncate=None, unsorted_id=0,
min_active=None, minLength=None, maxLength=None,
PrimaryThreshold=None, minThresholdLength=None,
SecondaryThreshold=None):
"""Determine PBEs from multiunit activity or spike trains.
Definitions
-----------
MUA : multiunit activity
PBE : population burst event
Summary
-------
This function can be used to identify PBE epochs from spike trains, binned
spike trains, or multiunit activity (in the form of an AnalogSignalArray).
It is recommended to either pass in a SpikeTrainArray or a
BinnedSpikeTrainArray, so that a `min_active` number of sorted units can be
set.
It is also recommended that the unsorted units (but not noise artifacts!)
should be included in the spike train that is used to estimate the PBEs. By
default, unit_id=0 is assumed to be unsorted, but this can be changed, or if
no unsorted units are present, you can set unsorted_id=None. Equivalently,
if min_active=0, then no restriction will apply, and the unsorted_id will
have no effect on the final PBE epochs.
Examples
--------
PBE_epochs = get_PBEs(mua_asa)
PBE_epochs = get_PBEs(spiketrain, min_active=5)
PBE_epochs = get_PBEs(binnedspiketrain, min_active=5)
Parameters
----------
data : AnalogSignalArray
AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz].
-- OR --
data : SpikeTrainArray
SpikeTrainArray with multiple units, including unsorted unit(s), but
excluding any noise artifects.
-- OR --
data : BinnedSpikeTrainArray
BinnedSpikeTrainArray containing multiunit activity.
fs : float, optional
Sampling frequency of mua, in Hz. If not specified, it will be inferred
from data.
ds : float, optional
Time step in which to bin spikes. Default is 1 ms.
sigma : float, optional
Standard deviation (in seconds) of Gaussian smoothing kernel.
Default is 10 ms. If sigma==0 then no smoothing is applied.
truncate : float, optional
Bandwidth of the Gaussian filter. Default is 6.
unsorted_id : int, optional
unit_id of the unsorted unit. Default is 0. If no unsorted unit is
present, then set unsorted_id = None
min_active : int, optional
Minimum number of active units per event, excluding unsorted unit.
Default is 5.
minLength : float, optional
Minimum event duration in seconds. Default is 50 ms.
maxLength : float, optional
Maximum event duration in seconds. Default is 750 ms.
PrimaryThreshold : float, optional
Primary threshold to exceed. Default is mean() + 3*std()
SecondaryThreshold : float, optional
Secondary threshold to fall back to. Default is mean().
minThresholdLength : float, optional
Minimum duration to stay above PrimaryThreshold. Default is 0 ms.
Returns
-------
PBE_epochs : EpochArray
EpochArray containing all the PBEs.
Future improvements
-------------------
As of now, it is possible, but not easy to specify the Primary and Secondary
thresholds for event detection. A slight change in API might be needed to
make this specification more flexible.
"""
if sigma is None:
sigma = 0.01 # 10 ms standard deviation
if truncate is None:
truncate = 6
if isinstance(data, core.AnalogSignalArray):
# if we have only mua, then we cannot set (ds, unsorted_id, min_active)
if ds is not None:
raise ValueError('if data is an AnalogSignalArray then ds cannot be specified!')
if unsorted_id:
raise ValueError('if data is an AnalogSignalArray then unsorted_id cannot be specified!')
if min_active is not None:
raise ValueError('if data is an AnalogSignalArray then min_active cannot be specified!')
mua = data
mua._data = mua._data.astype(float)
if (sigma != 0) and (truncate > 0):
mua = gaussian_filter(mua, sigma=sigma, truncate=truncate)
elif isinstance(data, (core.EventArray, core.BinnedEventArray)):
# set default parameter values:
if ds is None:
ds = 0.001 # default 1 ms
if min_active is None:
min_active = 5
mua = get_mua(data, ds=ds, sigma=sigma, truncate=truncate, _fast=True)
else:
raise TypeError('data has to be one of (AnalogSignalArray, SpikeTrainArray, BinnedSpikeTrainArray)')
# set default parameter values:
if fs is None:
fs = mua.fs
if minLength is None:
minLength = 0.050 # 50 ms minimum event duration
if maxLength is None:
maxLength = 0.750 # 750 ms maximum event duration
if minThresholdLength is None:
minThresholdLength = 0.0
# if PrimaryThreshold is None:
# PrimaryThreshold =
# if SecondaryThreshold is None:
# SecondaryThreshold =
PBE_epochs = get_mua_events(mua=mua,
fs=fs,
minLength=minLength,
maxLength=maxLength,
PrimaryThreshold=PrimaryThreshold,
minThresholdLength=minThresholdLength,
SecondaryThreshold=SecondaryThreshold)
# now require min_active number of sorted cells
if isinstance(data, (core.EventArray, core.BinnedEventArray)):
if min_active > 0:
if unsorted_id is not None:
# remove unsorted unit, if present:
unit_ids = copy.deepcopy(data.unit_ids)
try:
unit_ids.remove(unsorted_id)
except ValueError:
pass
# data_ = data._unit_subset(unit_ids)
data_ = data.loc[:,unit_ids]
else:
data_ = data
# determine number of active units per epoch:
n_active = np.array([snippet.n_active for snippet in data_[PBE_epochs]])
active_epochs_idx = np.argwhere(n_active > min_active).squeeze()
# only keep those epochs where sufficiently many units are active:
PBE_epochs = PBE_epochs[active_epochs_idx]
return PBE_epochs
def get_contiguous_segments(data, *, step=None, assume_sorted=None,
in_core=True, index=False, inclusive=False,
fs=None, sort=None, in_memory=None):
"""Compute contiguous segments (seperated by step) in a list.
Note! This function requires that a sorted list is passed.
It first checks if the list is sorted O(n), and only sorts O(n log(n))
if necessary. But if you know that the list is already sorted,
you can pass assume_sorted=True, in which case it will skip
the O(n) check.
Returns an array of size (n_segments, 2), with each row
being of the form ([start, stop]) [inclusive, exclusive].
NOTE: when possible, use assume_sorted=True, and step=1 as explicit
arguments to function call.
WARNING! Step is robustly computed in-core (i.e., when in_core is
True), but is assumed to be 1 when out-of-core.
Example
-------
>>> data = [1,2,3,4,10,11,12]
>>> get_contiguous_segments(data)
([1,5], [10,13])
>>> get_contiguous_segments(data, index=True)
([0,4], [4,7])
Parameters
----------
data : array-like
1D array of sequential data, typically assumed to be integral (sample
numbers).
step : float, optional
Expected step size for neighboring samples. Default uses numpy to find
the median, but it is much faster and memory efficient to explicitly
pass in step=1.
assume_sorted : bool, optional
If assume_sorted == True, then data is not inspected or re-ordered. This
can be significantly faster, especially for out-of-core computation, but
it should only be used when you are confident that the data is indeed
sorted, otherwise the results from get_contiguous_segments will not be
reliable.
in_core : bool, optional
If True, then we use np.diff which requires all the data to fit
into memory simultaneously, otherwise we use groupby, which uses
a generator to process potentially much larger chunks of data,
but also much slower.
index : bool, optional
If True, the indices of segment boundaries will be returned. Otherwise,
the segment boundaries will be returned in terms of the data itself.
Default is False.
inclusive : bool, optional
If True, the boundaries are returned as [(inclusive idx, inclusive idx)]
Default is False, and can only be used when index==True.
Deprecated
----------
in_memory : bool, optional
This is equivalent to the new 'in-core'.
sort : bool, optional
This is equivalent to the new 'assume_sorted'
fs : sampling rate (Hz) used to extend half-open interval support by 1/fs
"""
# handle deprecated API calls:
if in_memory:
in_core = in_memory
logging.warning("'in_memory' has been deprecated; use 'in_core' instead")
if sort:
assume_sorted = sort
logging.warning("'sort' has been deprecated; use 'assume_sorted' instead")
if fs:
step = 1/fs
logging.warning("'fs' has been deprecated; use 'step' instead")
if inclusive:
assert index, "option 'inclusive' can only be used with 'index=True'"
if in_core:
data = np.asarray(data)
if not assume_sorted:
if not is_sorted(data):
data = np.sort(data) # algorithm assumes sorted list
if step is None:
step = np.median(np.diff(data))
# assuming that data(t1) is sampled somewhere on [t, t+1/fs) we have a 'continuous' signal as long as
# data(t2 = t1+1/fs) is sampled somewhere on [t+1/fs, t+2/fs). In the most extreme case, it could happen
# that t1 = t and t2 = t + 2/fs, i.e. a difference of 2 steps.
if np.any(np.diff(data) < step):
logging.warning("some steps in the data are smaller than the requested step size.")
breaks = np.argwhere(np.diff(data)>=2*step)
starts = np.insert(breaks+1, 0, 0)
stops = np.append(breaks, len(data)-1)
bdries = np.vstack((data[starts], data[stops] + step)).T
if index:
if inclusive:
indices = np.vstack((starts, stops)).T
else:
indices = np.vstack((starts, stops + 1)).T
return indices
else:
from itertools import groupby
from operator import itemgetter
if not assume_sorted:
if not is_sorted(data):
# data = np.sort(data) # algorithm assumes sorted list
raise NotImplementedError("out-of-core sorting has not been implemented yet...")
if step is None:
step = 1
bdries = []
if not index:
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
start = next(gen)
stop = start
for stop in gen:
pass
bdries.append([start, stop + step])
else:
counter = 0
for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])):
f = itemgetter(1)
gen = (f(x) for x in g)
_ = next(gen)
start = counter
stop = start
for _ in gen:
stop +=1
if inclusive:
bdries.append([start, stop])
else:
bdries.append([start, stop + 1])
counter = stop + 1
return np.asarray(bdries)
def get_direction(asa, *, sigma=None):
"""Return epochs during which an animal was running left to right, or right
to left.
Parameters
----------
asa : AnalogSignalArray 1D
AnalogSignalArray containing the 1D position data.
sigma : float, optional
Smoothing to apply to position (x) before computing gradient estimate.
Default is 0.
Returns
-------
l2r, r2l : EpochArrays
EpochArrays corresponding to left-to-right and right-to-left movement.
"""
if sigma is None:
sigma = 0
if not isinstance(asa, core.AnalogSignalArray):
raise TypeError('AnalogSignalArray expected!')
assert asa.n_signals == 1, "1D AnalogSignalArray expected!"
direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma),
rectify=False).data
direction[direction>=0] = 1
direction[direction<0] = -1
direction = direction.squeeze()
l2r = get_contiguous_segments(np.argwhere(direction>0).squeeze(), step=1)
l2r[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
l2r = core.EpochArray(asa.abscissa_vals[l2r])
r2l = get_contiguous_segments(np.argwhere(direction<0).squeeze(), step=1)
r2l[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive]
r2l = core.EpochArray(asa.abscissa_vals[r2l])
return l2r, r2l
class PrettyBytes(int):
"""Prints number of bytes in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
if self.val < 1024:
return '{} bytes'.format(self.val)
elif self.val < 1024**2:
return '{:.3f} kilobytes'.format(self.val/1024)
elif self.val < 1024**3:
return '{:.3f} megabytes'.format(self.val/1024**2)
elif self.val < 1024**4:
return '{:.3f} gigabytes'.format(self.val/1024**3)
def __repr__(self):
return self.__str__()
class PrettyInt(int):
"""Prints integers in a more readable format"""
def __init__(self, val):
self.val = val
def __str__(self):
return '{:,}'.format(self.val)
def __repr__(self):
return '{:,}'.format(self.val)
class PrettyDuration(float):
"""Time duration with pretty print.
Behaves like a float, and can always be cast to a float.
"""
def __init__(self, seconds):
self.duration = seconds
def __str__(self):
return self.time_string(self.duration)
def __repr__(self):
return self.time_string(self.duration)
@staticmethod
def to_dhms(seconds):
"""convert seconds into hh:mm:ss:ms"""
pos = seconds >= 0
if not pos:
seconds = -seconds
ms = seconds % 1; ms = round(ms*10000)/10
seconds = floor(seconds)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
Time = namedtuple('Time', 'pos dd hh mm ss ms')
time = Time(pos=pos, dd=d, hh=h, mm=m, ss=s, ms=ms)
return time
@staticmethod
def time_string(seconds):
"""returns a formatted time string."""
if np.isinf(seconds):
return 'inf'
pos, dd, hh, mm, ss, s = PrettyDuration.to_dhms(seconds)
if s > 0:
if mm == 0:
# in this case, represent milliseconds in terms of
# seconds (i.e. a decimal)
sstr = str(s/1000).lstrip('0')
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
# for all other cases, milliseconds will be represented
# as an integer
if s >= 999.5:
ss += 1
s = 0
sstr = ""
# now propagate the carry:
if ss == 60:
mm += 1
ss = 0
if mm == 60:
hh +=1
mm = 0
if hh == 24:
dd += 1
hh = 0
else:
sstr = ":{:03d}".format(int(s))
else:
sstr = ""
if dd > 0:
daystr = "{:01d} days ".format(dd)
else:
daystr = ""
if hh > 0:
timestr = daystr + "{:01d}:{:02d}:{:02d}{} hours".format(hh, mm, ss, sstr)
elif mm > 0:
timestr = daystr + "{:01d}:{:02d}{} minutes".format(mm, ss, sstr)
elif ss > 0:
timestr = daystr + "{:01d}{} seconds".format(ss, sstr)
else:
timestr = daystr +"{} milliseconds".format(s)
if not pos:
timestr = "-" + timestr
return timestr
def __add__(self, other):
"""a + b"""
return PrettyDuration(self.duration + other)
def __radd__(self, other):
"""b + a"""
return self.__add__(other)
def __sub__(self, other):
"""a - b"""
return PrettyDuration(self.duration - other)
def __rsub__(self, other):
"""b - a"""
return other - self.duration
def __mul__(self, other):
"""a * b"""
return PrettyDuration(self.duration * other)
def __rmul__(self, other):
"""b * a"""
return self.__mul__(other)
def __truediv__(self, other):
"""a / b"""
return PrettyDuration(self.duration / other)
def shrinkMatColsTo(mat, numCols):
""" Docstring goes here
Shrinks a NxM1 matrix down to an NxM2 matrix, where M2 <= M1"""
import scipy.ndimage
numCells = mat.shape[0]
numColsMat = mat.shape[1]
a = np.zeros((numCells, numCols))
for row in np.arange(numCells):
niurou = scipy.ndimage.interpolation.zoom(input=mat[row,:], zoom=(numCols/numColsMat), order = 1)
a[row,:] = niurou
return a
def find_threshold_crossing_events(x, threshold, *, mode='above'):
"""Find threshold crossing events. INCLUSIVE
Parameters
----------
x : numpy array
Input data
threshold : float
The value whose crossing triggers an event
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
Returns
-------
eventlist : list
List containing the indices corresponding to threshold crossings
eventmax : list
List containing the maximum value of each event
"""
from itertools import groupby
from operator import itemgetter
if mode == 'below':
cross_threshold = np.where(x <= threshold, 1, 0)
elif mode == 'above':
cross_threshold = np.where(x >= threshold, 1, 0)
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
eventlist = []
eventmax = []
for k,v in groupby(enumerate(cross_threshold),key=itemgetter(1)):
if k:
v = list(v)
eventlist.append([v[0][0],v[-1][0]])
try :
eventmax.append(x[v[0][0]:(v[-1][0]+1)].max())
except :
print(v, x[v[0][0]:v[-1][0]])
eventmax = np.asarray(eventmax)
eventlist = np.asarray(eventlist)
return eventlist, eventmax
def get_events_boundaries(x, *, PrimaryThreshold=None,
SecondaryThreshold=None,
minThresholdLength=None, minLength=None,
maxLength=None, ds=None, mode='above'):
"""get event boundaries such that event.max >= PrimaryThreshold
and the event extent is defined by SecondaryThreshold.
Note that when PrimaryThreshold==SecondaryThreshold, then this is a
simple threshold crossing algorithm.
NB. minLength and maxLength are applied to the SecondaryThreshold
events, whereas minThresholdLength is applied to the
PrimaryThreshold events.
Parameters
----------
x : numpy array
Input data
mode : string, optional in ['above', 'below']; default 'above'
event triggering above, or below threshold
PrimaryThreshold : float, optional
If mode=='above', requires that event.max >= PrimaryThreshold
If mode=='below', requires that event.min <= PrimaryThreshold
SecondaryThreshold : float, optional
The value that defines the event extent
minThresholdLength : float, optional
Minimum duration for which the PrimaryThreshold is crossed
minLength : float, optional
Minimum duration for which the SecondaryThreshold is crossed
maxLength : float, optional
Maximum duration for which the SecondaryThreshold is crossed
ds : float, optional
Time step of the input data x
Returns
-------
returns bounds, maxes, events
where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive
maxes <==> maximum value during each event
events <==> PrimaryThreshold to PrimaryThreshold, inclusive
"""
# TODO: x must be a numpy array
# TODO: ds is often used, but we have no default, and no check for when
# it is left as None.
# TODO: the Docstring should equally be improved.
x = x.squeeze()
if x.ndim > 1:
raise TypeError("multidimensional arrays not supported!")
if PrimaryThreshold is None: # by default, threshold is 3 SDs above mean of x
PrimaryThreshold = np.mean(x) + 3*np.std(x)
if SecondaryThreshold is None: # by default, revert back to mean of x
SecondaryThreshold = np.mean(x) # + 0*np.std(x)
events, _ = \
find_threshold_crossing_events(x=x,
threshold=PrimaryThreshold,
mode=mode)
# apply minThresholdLength criterion:
if minThresholdLength is not None and len(events) > 0:
durations = (events[:,1] - events[:,0] + 1) * ds
events = events[[durations >= minThresholdLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Find periods where value is > SecondaryThreshold; note that the previous periods should be within these!
if mode == 'above':
assert SecondaryThreshold <= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
elif mode == 'below':
assert SecondaryThreshold >= PrimaryThreshold, \
"Secondary Threshold by definition should include more data than Primary Threshold"
else:
raise NotImplementedError(
"mode {} not understood for find_threshold_crossing_events".format(str(mode)))
bounds, broader_maxes = \
find_threshold_crossing_events(x=x,
threshold=SecondaryThreshold,
mode=mode)
# Find corresponding big windows for potential events
# Specifically, look for closest left edge that is just smaller
outer_boundary_indices = np.searchsorted(bounds[:,0], events[:,0], side='right')
# searchsorted finds the index after, so subtract one to get index before
outer_boundary_indices = outer_boundary_indices - 1
# Find extended boundaries for events by pairing to larger windows
# (Note that there may be repeats if the larger window contains multiple > 3SD sections)
bounds = bounds[outer_boundary_indices,:]
maxes = broader_maxes[outer_boundary_indices]
if minLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations >= minLength]]
maxes = maxes[[durations >= minLength]]
events = events[[durations >= minLength]]
if maxLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations <= maxLength]]
maxes = maxes[[durations <= maxLength]]
events = events[[durations <= maxLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Now, since all that we care about are the larger windows, so we should get rid of repeats
_, unique_idx = np.unique(bounds[:,0], return_index=True)
bounds = bounds[unique_idx,:] # SecondaryThreshold to SecondaryThreshold
maxes = maxes[unique_idx] # maximum value during event
events = events[unique_idx,:] # PrimaryThreshold to PrimaryThreshold
return bounds, maxes, events
def signal_envelope1D(data, *, sigma=None, fs=None):
logging.warnings("'signal_envelope1D' is deprecated; use 'signal_envelope_1d' instead!")
return signal_envelope_1d(data, sigma=sigma, fs=fs)
def signal_envelope_1d(data, *, sigma=None, fs=None):
"""Finds the signal envelope by taking the absolute value
of the Hilbert transform
Parameters
----------
data : numpy array, list, or RegularlySampledAnalogSignalArray
Input data
If data is a numpy array, it is expected to have shape
(n_signals, n_samples)
If data is a list, it is expected to have length n_signals,
where each sublist has length n_samples, i.e. data is not
jagged
sigma : float, optional
Standard deviation of the Gaussian kernel used to
smooth the envelope after applying the Hilbert transform.
Units of seconds. Default is 4 ms
fs : float, optional
Sampling rate of the signal
Returns
-------
out : same type as the input object
An object containing the signal envelope
TODO: this is not yet epoch-aware!
UPDATE: this is actually epoch-aware by now!
"""
if sigma is None:
sigma = 0.004 # 4 ms standard deviation
if fs is None:
if isinstance(data, (np.ndarray, list)):
raise ValueError("sampling frequency must be specified!")
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
fs = data.fs
if isinstance(data, (np.ndarray, list)):
data_array = np.array(data)
n_dims = np.array(data).ndim
assert n_dims <= 2, "Only 1D signals supported!"
if n_dims == 1:
input_data = data_array.reshape((1, data_array.size))
else:
input_data = data_array
n_signals, n_samples = input_data.shape
# Compute number of samples to compute fast FFTs
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (input_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
if isinstance(data, list):
envelope = envelope.tolist()
return envelope
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
# Only ASA data of shape (n_signals, n_timepoints) -> 2D currently supported
assert data.data.ndim == 2
cum_lengths = np.insert(np.cumsum(data.lengths), 0, 0)
newasa = data.copy()
# for segment in data:
for idx in range(data.n_epochs):
# print('hilberting epoch {}/{}'.format(idx+1, data.n_epochs))
segment_data = data._data[:,cum_lengths[idx]:cum_lengths[idx+1]]
n_signals, n_samples = segment_data.shape
# Compute number of samples to compute fast FFTs:
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (segment_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
newasa._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.atleast_2d(envelope)
return newasa
def nextpower(n, base=2.0):
"""Return the next integral power of two greater than the given number.
Specifically, return m such that
m >= n
m == 2**x
where x is an integer. Use base argument to specify a base other than 2.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (<NAME>)
"""
x = base**ceil (log (n) / log (base))
if type(n) == np.ndarray:
return np.asarray (x, dtype=int)
else:
return int (x)
def nextfastpower(n):
"""Return the next integral power of small factors greater than the given
number. Specifically, return m such that
m >= n
m == 2**x * 3**y * 5**z
where x, y, and z are integers.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (<NAME>)
See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html
"""
if n < 7:
return max (n, 1)
# x, y, and z are all bounded from above by the formula of nextpower.
# Compute all possible combinations for powers of 3 and 5.
# (Not too many for reasonable FFT sizes.)
def power_series (x, base):
nmax = ceil (log (x) / log (base))
return np.logspace (0.0, nmax, num=nmax+1, base=base)
n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0))
n35 = n35[n35<=n]
# Lump the powers of 3 and 5 together and solve for the powers of 2.
n2 = nextpower (n / n35)
return int (min (n2 * n35))
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def gaussian_filter(obj, *, fs=None, sigma=None, truncate=None, inplace=False, mode=None, cval=None, within_intervals=False):
"""Smooths with a Gaussian kernel.
Smoothing is applied along the abscissa, and the same smoothing is applied to each
signal in the RegularlySampledAnalogSignalArray, or to each unit in a BinnedSpikeTrainArray.
Smoothing is applied ACROSS intervals, but smoothing WITHIN intervals is also supported.
Parameters
----------
obj : RegularlySampledAnalogSignalArray or BinnedSpikeTrainArray.
fs : float, optional
Sampling rate (in obj.base_unit^-1) of obj. If not provided, it will
be inferred.
sigma : float, optional
Standard deviation of Gaussian kernel, in obj.base_units. Default is 0.05
(50 ms if base_unit=seconds).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0.
inplace : bool
If True the data will be replaced with the smoothed data.
Default is False.
mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional
The mode parameter determines how the array borders are handled,
where cval is the value when mode is equal to ‘constant’. Default is
‘reflect’.
cval : scalar, optional
Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
within_intervals : boolean, optional
If True, then smooth within each epoch. Otherwise smooth across epochs.
Default is False.
Note that when mode = 'wrap', then smoothing within epochs aren't affected
by wrapping.
Returns
-------
out : same type as obj
An object with smoothed data is returned.
"""
if sigma is None:
sigma = 0.05
if truncate is None:
truncate = 4
if mode is None:
mode = 'reflect'
if cval is None:
cval = 0.0
if not inplace:
out = copy.deepcopy(obj)
else:
out = obj
if isinstance(out, core.RegularlySampledAnalogSignalArray):
if fs is None:
fs = out.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
elif isinstance(out, core.BinnedEventArray):
bst = out
if fs is None:
fs = 1/bst.ds
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
else:
raise NotImplementedError("gaussian_filter for {} is not yet supported!".format(str(type(out))))
sigma = sigma * fs
if not within_intervals:
# see https://stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python
# (1) if smoothing across intervals, we work on a merged support
# (2) build abscissa_vals, including existing ones, and out-of-support ones
# (3) to smooth U, build auxiliary arrays V and W, with (V=U).nan=0, and (W=1).nan=0
# (4) Z = smooth(V)/smooth(W)
# (5) only keep original support, and original abscissa_vals
if isinstance(out, (core.RegularlySampledAnalogSignalArray, core.BinnedEventArray)):
support = out._abscissa.support.merge()
if not support.domain.is_finite:
support.domain = (support.start, support.stop) #TODO: #FIXME might come from abscissa definition, and not from support
missing_abscissa_vals = []
for interval in (~support):
missing_vals = frange(interval.start, interval.stop, 1/fs)
missing_abscissa_vals.extend(missing_vals)
if isinstance(out, core.RegularlySampledAnalogSignalArray):
n_signals = out.n_signals
n_samples = out.n_samples
elif isinstance(out, core.BinnedEventArray):
n_signals = out.n_series
n_samples = out.n_bins
V = np.zeros((n_signals, n_samples + len(missing_abscissa_vals)))
W = np.ones(V.shape)
all_abscissa_vals = np.sort(np.append(out._abscissa_vals, missing_abscissa_vals))
data_idx = np.searchsorted(all_abscissa_vals, out._abscissa_vals)
missing_idx = np.searchsorted(all_abscissa_vals, missing_abscissa_vals)
V[:, data_idx] = out.data
W[:, missing_idx] = 0
VV = scipy.ndimage.filters.gaussian_filter(V, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval)
WW = scipy.ndimage.filters.gaussian_filter(W, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval)
Z = VV[:,data_idx]/WW[:,data_idx]
out._data = Z
else:
raise NotImplementedError("gaussian_filter across intervals for {} is not yet supported!".format(str(type(out))))
else: # within intervals:
cum_lengths = np.insert(np.cumsum(out.lengths), 0, 0)
out._data = out._data.astype(float)
if isinstance(out, core.RegularlySampledAnalogSignalArray):
# now smooth each interval separately
for idx in range(out.n_intervals):
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = scipy.ndimage.filters.gaussian_filter(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], sigma=(0,sigma), truncate=truncate)
elif isinstance(out, core.BinnedSpikeTrainArray):
# now smooth each interval separately
for idx in range(out.n_epochs):
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = scipy.ndimage.filters.gaussian_filter(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], sigma=(0,sigma), truncate=truncate)
# out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = self._smooth_array(out._data[:,cum_lengths[idx]:cum_lengths[idx+1]], w=w)
return out
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def ddt_asa(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None, norm=False):
"""Numerical differentiation of a regularly sampled AnalogSignalArray.
Optionally also smooths result with a Gaussian kernel.
Smoothing is applied in time, and the same smoothing is applied to each
signal in the AnalogSignalArray.
Differentiation, (and if requested, smoothing) is applied within each epoch.
Parameters
----------
asa : nelpy.RegularlySampledAnalogSignalArray
Input object.
fs : float, optional
Sampling rate (in Hz) of input RSASA. If not provided, it will be obtained
from asa.fs.
smooth : bool, optional
If true, result will be smoothed. Default is False
rectify : bool, optional
If True, absolute value of derivative is computed. Default is True.
sigma : float, optional
Standard deviation of Gaussian kernel, in seconds. Default is 0.05
(50 ms).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0
norm: boolean, optional
If True, then apply the L2 norm to the result.
Returns
-------
out : nelpy.RegularlySampledAnalogSignalArray
A RegularlySampledAnalogSignalArray with derivative data (in units
per second) is returned.
Notes
-----
Central differences are used here.
"""
if not isinstance(asa, core.RegularlySampledAnalogSignalArray):
raise TypeError("Input object must be a RegularlySampledAnalogSignalArray!")
if fs is None:
fs = asa.fs
if sigma is None:
sigma = 0.05 # 50 ms default
out = asa.copy()
cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0)
# ensure that datatype is float
# TODO: this will break complex data
out._data = out.data.astype(float)
# now obtain the derivative for each epoch separately
for idx in range(asa.n_epochs):
# if 1D:
if asa.n_signals == 1:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[[0],cum_lengths[idx]:cum_lengths[idx+1]], axis=1)
else:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[:,cum_lengths[idx]:cum_lengths[idx+1]], axis=1)
out._data = out._data * fs
if norm:
out._data = np.atleast_2d(np.linalg.norm(out._data, axis=0))
if rectify:
out._data = np.abs(out._data)
if smooth:
out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate)
return out
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def dxdt_AnalogSignalArray(asa, *, fs=None, smooth=False, rectify=True, sigma=None, truncate=None):
"""Numerical differentiation of a regularly sampled AnalogSignalArray.
Optionally also smooths result with a Gaussian kernel.
Smoothing is applied in time, and the same smoothing is applied to each
signal in the AnalogSignalArray.
Differentiation, (and if requested, smoothing) is applied within each epoch.
Parameters
----------
asa : AnalogSignalArray
fs : float, optional
Sampling rate (in Hz) of AnalogSignalArray. If not provided, it will
be obtained from asa.fs
smooth : bool, optional
If true, result will be smoothed. Default is False
rectify : bool, optional
If True, absolute value of derivative is computed. Default is True.
sigma : float, optional
Standard deviation of Gaussian kernel, in seconds. Default is 0.05
(50 ms).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0
Returns
-------
out : AnalogSignalArray
An AnalogSignalArray with derivative data (in units per second) is returned.
"""
raise DeprecationWarning('use ddt_asa instead!')
if fs is None:
fs = asa.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the AnalogSignalArray!")
if sigma is None:
sigma = 0.05 # 50 ms default
out = copy.deepcopy(asa)
cum_lengths = np.insert(np.cumsum(asa.lengths), 0, 0)
# ensure that datatype is float
out._data = out.data.astype(float)
if asa.n_signals == 2:
out._data = out._data[[0],:]
# now obtain the derivative for each epoch separately
for idx in range(asa.n_epochs):
# if 1D:
if asa.n_signals == 1:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.gradient(asa._data[[0],cum_lengths[idx]:cum_lengths[idx+1]], axis=1)
elif asa.n_signals == 2:
if (cum_lengths[idx+1]-cum_lengths[idx]) < 2:
# only single sample
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = 0
else:
out._data[[0],cum_lengths[idx]:cum_lengths[idx+1]] = np.linalg.norm(np.gradient(asa._data[:,cum_lengths[idx]:cum_lengths[idx+1]], axis=1), axis=0)
else:
raise TypeError("more than 2D not currently supported!")
out._data = out._data * fs
if rectify:
out._data = np.abs(out._data)
if smooth:
out = gaussian_filter(out, fs=fs, sigma=sigma, truncate=truncate)
return out
def get_threshold_crossing_epochs(asa, t1=None, t2=None, mode='above'):
"""Return epochs where a signal crosses a compound threshold specified by t1
and t2.
Parameters
----------
asa : AnalogSignalArray
AnalogSignalArray containing a single channel
t1 : float, optional
Primary threshold. Minimum signal value that has to be reached /
exceeded during an event. Default is 3 standard deviations above signal
mean.
t2 : float, optional
Secondary threshold. Signal value that defines the event boundaries.
Default is signal mean.
mode : string, optional
Mode of operation. One of ['above', 'below']. If 'above', then return
epochs where the signal exceeds the compound threshold, and if 'below',
then return epochs where the signal falls below the compound threshold.
Default is 'above'.
Returns
-------
epochs : EpochArray
EpochArray with all the epochs where the signal satisfied the criteria.
"""
if asa.n_signals > 1:
raise TypeError("multidimensional AnalogSignalArrays not supported!")
x = asa.data.squeeze()
if t1 is None: # by default, threshold is 3 SDs above mean of x
t1 = np.mean(x) + 3*np.std(x)
if t2 is None: # by default, revert back to mean of x
t2 = np.mean(x)
# compute periods where signal exceeds compound threshold
epoch_bounds, _, _ = get_events_boundaries(
x=x,
PrimaryThreshold=t1,
SecondaryThreshold=t2,
mode=mode
)
# convert bounds to time in seconds
epoch_bounds = asa.time[epoch_bounds]
if len(epoch_bounds) == 0:
return type(asa._abscissa.support)(empty=True)
# add 1/fs to stops for open interval
epoch_bounds[:,1] += 1/asa.fs
# create EpochArray with threshould exceeding bounds
epochs = type(asa._abscissa.support)(epoch_bounds)
return epochs
def get_run_epochs(speed, v1=10, v2=8):
"""Return epochs where animal is running at least as fast as
specified by v1 and v2.
Parameters
----------
speed : AnalogSignalArray
AnalogSignalArray containing single channel speed, in units/sec
v1 : float, optional
Minimum speed (in same units as speed) that has to be reached /
exceeded during an event. Default is 10 [units/sec]
v2 : float, optional
Speed that defines the event boundaries. Default is 8 [units/sec]
Returns
-------
run_epochs : EpochArray
EpochArray with all the epochs where speed satisfied the criteria.
"""
run_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='above')
return run_epochs
def get_inactive_epochs(speed, v1=5, v2=7):
"""Return epochs where animal is running no faster than specified by
v1 and v2.
Parameters
----------
speed : AnalogSignalArray
AnalogSignalArray containing single channel speed, in units/sec
v1 : float, optional
Minimum speed (in same units as speed) that has to be reached /
exceeded during an event. Default is 10 [units/sec]
v2 : float, optional
Speed that defines the event boundaries. Default is 8 [units/sec]
Returns
-------
inactive_epochs : EpochArray
EpochArray with all the epochs where speed satisfied the criteria.
"""
inactive_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='below')
return inactive_epochs
def spiketrain_union(st1, st2):
"""Join two spiketrains together.
WARNING! This function should be improved a lot!
"""
assert st1.n_units == st2.n_units
support = st1.support.join(st2.support)
newdata = []
for unit in range(st1.n_units):
newdata.append(np.append(st1.time[unit], st2.time[unit]))
fs = None
if st1.fs == st2.fs:
fs = st1.fs
return core.SpikeTrainArray(newdata, support=support, fs=fs)
########################################################################
# uncurated below this line!
########################################################################
def find_nearest_idx(array, val):
"""Finds nearest index in array to value.
Parameters
----------
array : np.array
val : float
Returns
-------
Index into array that is closest to val
TODO: this is a better version that should be incorporated:
# Based on answer here: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
def find_nearest(array,values):
right_idxs = np.searchsorted(array, values, side="left")
left_idxs = np.where(right_idxs > 0, right_idxs-1, right_idxs)
right_idxs = np.where(right_idxs == len(array), len(array)-1, right_idxs)
closest_idx = np.where(np.abs(values - array[right_idxs]) < np.abs(values - array[left_idxs]),
right_idxs, left_idxs)
return closest_idx
"""
return (np.abs(array-val)).argmin()
def find_nearest_indices(array, vals):
"""Finds nearest index in array to value.
Parameters
----------
array : np.array
This is the array you wish to index into.
vals : np.array
This is the array that you are getting your indices from.
Returns
-------
Indices into array that is closest to vals.
Notes
-----
Wrapper around find_nearest_idx().
"""
return np.array([find_nearest_idx(array, val) for val in vals], dtype=int)
def get_sort_idx(tuning_curves):
"""Finds indices to sort neurons by max firing in tuning curve.
Parameters
----------
tuning_curves : list of lists
Where each inner list is the tuning curves for an individual
neuron.
Returns
-------
sorted_idx : list
List of integers that correspond to the neuron in sorted order.
"""
tc_max_loc = []
for i, neuron_tc in enumerate(tuning_curves):
tc_max_loc.append((i, np.where(neuron_tc == np.max(neuron_tc))[0][0]))
sorted_by_tc = sorted(tc_max_loc, key=lambda x: x[1])
sorted_idx = []
for idx in sorted_by_tc:
sorted_idx.append(idx[0])
return sorted_idx
def collapse_time(obj, gap=0):
"""Collapse all epochs in a SpikeTrainArray and collapse them into a single, contiguous SpikeTrainArray"""
# TODO: redo SpikeTrainArray so as to keep the epochs separate!, and to support gaps!
# We'll have to ajust all the spikes per epoch... and we'll have to compute a new support. Also set a flag!
# If it's a SpikeTrainArray, then we left-shift the spike times. If it's an AnalogSignalArray, then we
# left-shift the time and tdata.
# Also set a new attribute, with the boundaries in seconds.
if isinstance(obj, core.RegularlySampledAnalogSignalArray):
new_obj = type(obj)(empty=True)
new_obj._data = obj._data
durations = obj.support.durations
starts = np.insert(np.cumsum(durations + gap),0,0)[:-1]
stops = starts + durations
newsupport = type(obj._abscissa.support)(np.vstack((starts, stops)).T)
new_obj._support = newsupport
new_time = obj.time.astype(float) # fast copy
time_idx = np.insert(np.cumsum(obj.lengths),0,0)
new_offset = 0
for epidx in range(obj.n_epochs):
if epidx > 0:
new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset + gap
new_offset += durations[epidx] + gap
else:
new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset
new_offset += durations[epidx]
new_obj._time = new_time
new_obj._fs = obj._fs
elif isinstance(obj, core.EventArray):
if gap > 0:
raise ValueError("gaps not supported for SpikeTrainArrays yet!")
new_obj = type(obj)(empty=True)
new_time = [[] for _ in range(obj.n_series)]
duration = 0
for st_ in obj:
le = st_.support.start
for unit_ in range(obj.n_series):
new_time[unit_].extend(st_._data[unit_] - le + duration)
duration += st_.support.duration
new_time = np.asanyarray([np.asanyarray(unittime) for unittime in new_time])
new_obj._data = new_time
new_obj.support = type(obj._abscissa.support)([0, duration])
new_obj._series_ids = obj._series_ids
new_obj._series_labels = obj._series_labels
new_obj._series_tags = obj._series_tags
elif isinstance(obj, core.BinnedEventArray):
raise NotImplementedError("BinnedEventArrays are not yet supported, but bst.data is essentially already collapsed!")
else:
raise TypeError("unsupported type for collapse_time")
return new_obj
def cartesian(xcenters, ycenters):
"""Finds every combination of elements in two arrays.
Parameters
----------
xcenters : np.array
ycenters : np.array
Returns
-------
cartesian : np.array
With shape(n_sample, 2).
"""
return np.transpose([np.tile(xcenters, len(ycenters)), np.repeat(ycenters, len(xcenters))])
| 2.34375 | 2 |
python/paddle/fluid/contrib/slim/quantization/imperative/qat.py | logan-siyao-peng/Paddle | 0 | 7152 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import numpy as np
import sys
import os
import paddle
from paddle.fluid import dygraph, core, framework
from paddle.fluid.executor import Executor
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.nn import Linear, Conv2D, Conv2DTranspose, MaxPool2D, MaxPool1D, BatchNorm1D, BatchNorm2D, BatchNorm3D
from paddle.fluid.dygraph.nn import BatchNorm, Pool2D
from paddle.fluid.io import load_inference_model, save_inference_model
from paddle.nn.layer.activation import ReLU, LeakyReLU, Sigmoid, ReLU6, Tanh, Softmax, PReLU, Swish
from paddle.fluid.log_helper import get_logger
from . import quant_nn
from .. import quantization_pass
__all__ = ['ImperativeQuantAware', 'ImperativeCalcOutScale']
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
_op_real_in_out_name = {
"conv2d": [["Input", "Filter"], ["Output"]],
"conv2d_transpose": [["Input", "Filter"], ["Output"]],
"pool2d": [["X"], ["Out"]],
"elementwise_add": [["X", "Y"], ["Out"]],
"softmax": [["X"], ["Out"]],
"relu": [["X"], ["Out"]],
"relu6": [["X"], ["Out"]],
"leaky_relu": [["X"], ["Out"]],
"prelu": [["X"], ["Out"]],
"tanh": [["X"], ["Out"]],
"batch_norm": [["X"], ["Y"]],
"sigmoid": [["X"], ["Out"]],
"swish": [["X"], ["Out"]],
}
class ImperativeQuantAware(object):
"""
Add the fake quant logic for given quantizable layers, namely add the quant_dequant
computational logic both for activation inputs and weight inputs.
"""
def __init__(self,
weight_bits=8,
activation_bits=8,
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max',
moving_rate=0.9,
quantizable_layer_type=['Conv2D', 'Linear'],
weight_preprocess_layer=None,
act_preprocess_layer=None,
weight_quantize_layer=None,
act_quantize_layer=None):
r"""
The constructor for ImperativeQuantAware.
Args:
weight_bits(int): quantization bit number for weights,
whereas the bias is not quantized.
activation_bits(int): quantization bit number for activations.
weight_quantize_type(str): quantization type for weights,
which supports 'abs_max' now. The 'moving_average_abs_max'
usually is not used for weights, since weights are fixed once the
model is well trained.
activation_quantize_type(str): quantization type for activations,
which supports 'abs_max' and 'moving_average_abs_max' now.
If using 'abs_max' mode, the quantization scale will be calculated
dynamically each step in both training and testing period. If using
'moving_average_abs_max', the static quantization scale will be calculated
during training and used in inference.
moving_rate(float): the parameter for 'moving_average_abs_max' quantization.
quantizable_layer_type(list[str]): List the type of layers that will be quantized.
Default is ['Conv2D', 'Linear']. The quantizable_op_type in
QuantizationFreezePass and ConvertToInt8Pass must be the same as this.
weight_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to preprocess
weight before quantization. Using this can quickly test if user's
preprocess method works or not. The input is non-quantized
weight and function returns processed weight to be quantized.
If None, the weight will be quantized directly. Default is None.
act_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to preprocess
activation before quantization. Using this can quickly test if user's
preprocess method works or not. The input is non-quantized
activation and function returns processed activation to be quantized.
If None, the activation will be quantized directly. Default is None.
weight_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to quantize weight.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
weight and returns dequantized weight. If None, will use
quantization op defined by 'weight_quantize_type'. Default is None.
act_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines how to quantize activation.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
activation and returns dequantized activation. If None, will use
quantization op defined by 'activation_quantize_type'. Default is None.
Note:
If user sets attribute 'skip_quant' to a Layer that support dynamic quantization and sets
it to true, the layer would not be quantized during training. If this attribute is not sets
or the attribute is false, the Layer would be qunatized in training.
Examples 1:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization \
import ImperativeQuantAware
from paddle.vision.models \
import resnet
model = resnet.resnet50(pretrained=True)
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
# The outscale of outputs in supportted layers would be calculated.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./resnet50_qat",
input_spec=[
paddle.static.InputSpec(
shape=[None, 3, 224, 224], dtype='float32')])
Examples 2:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization \
import ImperativeQuantAware
class ImperativeModel(paddle.nn.Layer):
def __init__(self):
super(ImperativeModel, self).__init__()
# self.linear_0 would skip the quantization.
self.linear_0 = paddle.nn.Linear(784, 400)
self.linear_0.skip_quant = True
# self.linear_1 would not skip the quantization.
self.linear_1 = paddle.nn.Linear(400, 10)
self.linear_1.skip_quant = False
def forward(self, inputs):
x = self.linear_0(inputs)
x = self.linear_1(inputs)
return x
model = ImperativeModel()
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
#
# There is only one Layer(self.linear1) would be added the
# fake quant logical.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./imperative_model_qat")
"""
super(ImperativeQuantAware, self).__init__()
self._weight_bits = weight_bits
self._activation_bits = activation_bits
self._moving_rate = moving_rate
self._activation_quantize_type = activation_quantize_type
self._weight_quantize_type = weight_quantize_type
self._weight_pre_layer = weight_preprocess_layer
self._act_pre_layer = act_preprocess_layer
self._weight_quant_layer = weight_quantize_layer
self._act_quant_layer = act_quantize_layer
self._out_scale = ImperativeCalcOutScale()
t_check = lambda method: method is None or issubclass(method, dygraph.layers.Layer)
assert t_check(
self._weight_pre_layer), "weight_preprocess should be nn.Layer"
assert t_check(self._act_pre_layer), "act_preprocess should be nn.Layer"
assert t_check(
self._weight_quant_layer), "weight_quantize should be nn.Layer"
assert t_check(self._act_quant_layer), "act_quantize should be nn.Layer"
quant_type = {
'abs_max', 'moving_average_abs_max', 'channel_wise_abs_max'
}
assert activation_quantize_type != 'channel_wise_abs_max', \
"The activation quantization type does not support 'channel_wise_abs_max'."
if activation_quantize_type not in quant_type:
raise ValueError(
"Unknown activation_quantize_type : '%s'. It can only be "
"'abs_max' or 'moving_average_abs_max' now." %
(str(activation_quantize_type)))
if weight_quantize_type not in quant_type:
raise ValueError(
"Unknown weight_quantize_type: '%s'. It can only be "
"'abs_max' or 'moving_average_abs_max' or 'channel_wise_abs_max' now."
% (str(weight_quantize_type)))
self._quant_layers_map = {
'Conv2D': Conv2D,
'Linear': Linear,
'Pool2D': Pool2D,
'ReLU': ReLU,
'LeakyReLU': LeakyReLU,
'ReLU6': ReLU6,
'Softmax': Softmax,
'Tanh': Tanh,
'Swish': Swish
}
self._quantizable_layer_type = tuple(
self._quant_layers_map[layer]
if layer in self._quant_layers_map else layer
for layer in quantizable_layer_type)
for layer in self._quantizable_layer_type:
assert not isinstance(
layer, str), "{} is unspported to be quantized.".format(layer)
def quantize(self, model):
"""
According to weights' and activations' quantization types, the model will be added some fake
quant ops, such as fake_quantize_dequantize_moving_average_abs_max, fake_quantize_dequantize_abs_max
and so on. At the same time, the out_scale value of outputs would be calculated.
Args:
model(fluid.dygraph.Layer): the model to be quantized.
Returns:
None
"""
for name, layer in model.named_sublayers():
if not isinstance(layer, self._quantizable_layer_type):
continue
if hasattr(layer, "skip_quant") and layer.skip_quant == True:
continue
scopes = name.split('.')
target = scopes[-1]
obj = model
parent = model
for i in range(len(scopes) - 1):
obj = getattr(parent, scopes[i])
parent = obj
quant_layer = self._get_quantized_counterpart(layer)
setattr(quant_layer, "layer_name", layer.full_name())
setattr(obj, target, quant_layer)
self._out_scale.calc_out_scale(model)
def _get_quantized_counterpart(self, layer):
quant_layers = tuple(self._quant_layers_map.values())
quantized_counterpart = tuple('Quantized' + k
for k in self._quant_layers_map.keys())
predicate = lambda value: isinstance(layer, value)
index_generator = (i for i, v in enumerate(quant_layers)
if predicate(v))
try:
index = next(index_generator)
except StopIteration:
_logger.fatal("The layer {} is unsupported to be quantized.".format(
layer.full_name()))
sys.exit(-1)
layer_with_weight = ['QuantizedConv2D', 'QuantizedLinear']
if quantized_counterpart[index] not in layer_with_weight:
quant_layer_class_name = 'QuantizedNoweightLayer'
else:
quant_layer_class_name = quantized_counterpart[index]
quantized_layer = quant_nn.__dict__[quant_layer_class_name](
layer, self._weight_bits, self._activation_bits, self._moving_rate,
self._weight_quantize_type, self._activation_quantize_type,
self._weight_pre_layer, self._act_pre_layer,
self._weight_quant_layer, self._act_quant_layer)
return quantized_layer
def save_quantized_model(self, layer, path, input_spec=None, **config):
self._out_scale.save_quantized_model(layer, path, input_spec, **config)
class ImperativeCalcOutScale(object):
def __init__(self, moving_rate=0.9):
"""
Add the logic of calculating and setting output quantization scales of some layers.
These output quantization scales may be used by tensorRT or some other inference engines.
Args:
moving_rate(float): The decay coefficient of moving average. The default value is 0.9.
"""
super(ImperativeCalcOutScale, self).__init__()
self._moving_rate = moving_rate
self._out_scale_layer_type_list = (
BatchNorm, BatchNorm1D, BatchNorm2D, BatchNorm3D, Conv2D,
Conv2DTranspose, LeakyReLU, Linear, PReLU, Pool2D, MaxPool1D,
MaxPool2D, ReLU, ReLU6, Sigmoid, Softmax, Tanh, Swish)
self._register_hook_handle_list = []
self._out_scale_dict = collections.OrderedDict()
def calc_out_scale(self, model):
"""
Insert the `moving_average_abs_max_scale` op to calculate output scale of Specific layers in model.
Args:
model(fluid.dygraph.Layer): The target model which would be calculate the output quantization scale.
Returns:
None
"""
assert isinstance(
model, dygraph.Layer), "model must be the instance of dygraph.Layer"
for _, layer in model.named_sublayers():
if not isinstance(layer, self._out_scale_layer_type_list):
if 'quantized_' not in layer.full_name():
continue
forward_post_hook_handle = layer.register_forward_post_hook(
self._forward_post_hook)
self._register_hook_handle_list.append(forward_post_hook_handle)
def save_quantized_model(self, layer, path, input_spec=None, **config):
"""
Save the quantized model for the inference.
Args:
layer (Layer): The Layer to be saved.
path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
input_spec (list[InputSpec|Tensor], optional): Describes the input of the saved model's forward
method, which can be described by InputSpec or example Tensor. If None, all input variables of
the original Layer's forward method would be the inputs of the saved model. Default None.
**configs (dict, optional): Other save configuration options for compatibility. We do not
recommend using these configurations, they may be removed in the future. If not necessary,
DO NOT use them. Default None.
The following options are currently supported:
(1) output_spec (list[Tensor]): Selects the output targets of the saved model.
By default, all return variables of original Layer's forward method are kept as the
output of the saved model. If the provided ``output_spec`` list is not all output variables,
the saved model will be pruned according to the given ``output_spec`` list.
Returns:
None
"""
assert isinstance(
layer, dygraph.Layer), "model must be the instance of dygraph.Layer"
is_dynamic_mode = False
with dygraph.guard():
layer.eval()
for handle in self._register_hook_handle_list:
handle.remove()
for key in self._out_scale_dict:
self._out_scale_dict[key] = float(self._out_scale_dict[key]
.numpy())
if paddle.in_dynamic_mode():
is_dynamic_mode = True
paddle.enable_static()
paddle.jit.save(layer=layer, path=path, input_spec=input_spec, **config)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
file_prefix = os.path.basename(path)
dirname = os.path.dirname(path)
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
[inference_program, feed_target_names, fetch_targets] = (
load_inference_model(
dirname=dirname,
executor=exe,
model_filename=model_filename,
params_filename=params_filename))
# Traverse all ops in the program and find out the op matching
# the Layer in the dynamic graph.
layer_var_dict = {}
ops_list = [key for key, _ in self._out_scale_dict.items()]
op_count = 0
for block in inference_program.blocks:
for op in block.ops:
if op.type in _op_real_in_out_name:
if op.type in ["batch_norm", "pool2d"]:
if op.type == "pool2d" and op.attr(
"pooling_type") != "max":
continue
op_count = self.op_match(op, ops_list, op_count)
if op_count >= len(ops_list):
continue
op._set_attr('out_threshold',
self._out_scale_dict[ops_list[op_count]])
op_count += 1
else:
output_var_names = quantization_pass._get_op_output_var_names(
op)
for output_var_name in output_var_names:
output_var_tensor = block.var(output_var_name)
if output_var_tensor.dtype not in [
core.VarDesc.VarType.FP64,
core.VarDesc.VarType.FP32
]:
continue
# Because the Layer in dygraph may correspond to multiple ops
# in static program after being saved. To ensure correctness,
# the outscale collected for output of dygraph Layer can only
# be set to the last op in the corresponding ops in static program.
#
# We can judge the execution order of the ops which corresponding
# to dygraph Layer by the name of output. And use dict to save
# the corresponding relationship between the dygraph Layer and the
# static graph op that needs to set the outscale attribute.
if '.' not in output_var_name:
continue
dynamic_layer_name, var_name_suffix = output_var_name.split(
".")
if dynamic_layer_name in layer_var_dict:
if layer_var_dict[dynamic_layer_name][
0] < var_name_suffix:
layer_var_dict[dynamic_layer_name] = [
var_name_suffix, op
]
else:
layer_var_dict[dynamic_layer_name] = [
var_name_suffix, op
]
# Because the naming styles of static and dynamic graph are different,
# in order to avoid mistakes, we unify the name here.
for (layer_name, var_name_op_list) in layer_var_dict.items():
if 'prelu' in layer_name:
layer_name = layer_name.replace('prelu', 'p_re_lu')
if 'relu' in layer_name:
layer_name = layer_name.replace('relu', 're_lu')
if layer_name not in self._out_scale_dict:
continue
var_name_op_list[1]._set_attr('out_threshold',
self._out_scale_dict[layer_name])
# Save the processed program.
save_inference_model(
dirname=dirname,
feeded_var_names=feed_target_names,
target_vars=fetch_targets,
executor=exe,
main_program=inference_program.clone(),
model_filename=model_filename,
params_filename=params_filename)
if is_dynamic_mode:
paddle.disable_static()
def op_match(self, op, ops_list, op_count):
while op_count < len(ops_list) and op.type not in ops_list[op_count]:
op_count += 1
while op_count < len(ops_list) and op.type is "pool2d" and op.attr(
"pooling_type") != "max":
op_count += 1
return op_count
def _forward_post_hook(self, layer, input, output):
assert isinstance(
output, (core.VarBase, framework.Variable)
), "Multiple outputs are not currently supported in ImperativeOutScale."
if output.dtype not in [
core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64
]:
return
if not hasattr(layer, "_out_scale"):
layer._out_scale = quant_nn.MovingAverageAbsMaxScale(
output.name, self._moving_rate, output.dtype)
scale_out = layer._out_scale(output)
if hasattr(layer, 'layer_name'):
layer_name = layer.layer_name
else:
layer_name = layer.full_name()
self._out_scale_dict[layer_name] = scale_out
| 1.53125 | 2 |
sc/northwind.py | elliotgunn/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 7153 | <filename>sc/northwind.py
import pandas as pd
import sqlite3
from pandas import DataFrame
n_conn = sqlite3.connect('northwind_small.sqlite3')
n_curs = n_conn.cursor()
# What are the ten most expensive items (per unit price) in the database?
query = """
SELECT ProductName, UnitPrice
FROM Product
ORDER BY UnitPrice DESC
LIMIT 10
"""
n_curs.execute(query)
print(n_curs.fetchall())
# What is the average age of an employee at the time of their hiring? (Hint: a
# lot of arithmetic works with dates.)
query = """
SELECT AVG(HireDate-BirthDate)
FROM Employee
"""
n_curs.execute(query)
print(n_curs.fetchall())
# answer: 37.22
# (*Stretch*) How does the average age of employee at hire vary by city?
query = """SELECT City, AVG(HireDate-BirthDate)
FROM Employee
GROUP BY City
"""
n_curs.execute(query)
print(n_curs.fetchall())
# What are the ten most expensive items (per unit price)
# in the database *and* their suppliers?
query = """
SELECT ProductName, UnitPrice, CompanyName
FROM Product as p
JOIN Supplier as s
ON p.SupplierID = s.ID
ORDER BY UnitPrice DESC
LIMIT 10
"""
n_curs.execute(query)
print(n_curs.fetchall())
# What is the largest category (by number of unique products in it)?
query = """
SELECT CategoryName, COUNT(CategoryName)
FROM Category as c
JOIN Product as p
ON c.ID=p.CategoryID
GROUP BY CategoryName
ORDER by COUNT(CategoryName) DESC
"""
n_curs.execute(query)
print(n_curs.fetchall())
# largest category is Confections 13
# (*Stretch*) Who's the employee with the most territories? Use `TerritoryId`
# (not name, region, or other fields) as the unique identifier for territories.
# EMPLOYEE ID 7
query = """
SELECT EmployeeId, TerritoryId, COUNT(DISTINCT TerritoryId)
FROM EmployeeTerritory
GROUP BY EmployeeId
ORDER BY COUNT(DISTINCT TerritoryId) DESC
"""
n_curs.execute(query)
print(n_curs.fetchall())
| 4.0625 | 4 |
tests/test_app/library/loans/admin.py | Pijuli/django-jazzmin | 972 | 7154 | <gh_stars>100-1000
from django.contrib import admin
from django.urls import path
from .models import BookLoan, Library
from .views import CustomView
class BookLoanInline(admin.StackedInline):
model = BookLoan
extra = 1
readonly_fields = ("id", "duration")
fields = (
"book",
"imprint",
"status",
"due_back",
"borrower",
"loan_start",
"duration",
)
@admin.register(BookLoan)
class BookLoanAdmin(admin.ModelAdmin):
list_display = ("book", "status", "borrower", "due_back", "id")
list_filter = ("status", "due_back")
autocomplete_fields = ("borrower",)
search_fields = ("book__title",)
readonly_fields = ("id",)
fieldsets = (
(None, {"fields": ("book", "imprint", "id")}),
("Availability", {"fields": ("status", "due_back", "duration", "borrower")}),
)
def get_urls(self):
"""
Add in a custom view to demonstrate =
"""
urls = super().get_urls()
return urls + [path("custom_view", CustomView.as_view(), name="custom_view")]
def response_change(self, request, obj):
ret = super().response_change(request, obj)
if "reserve" in request.POST:
obj.status = "r"
obj.save()
return ret
@admin.register(Library)
class LibraryAdmin(admin.ModelAdmin):
list_display = ("name", "address", "librarian")
| 2.046875 | 2 |
lib/MergeMetabolicAnnotations/utils/CompareAnnotationsUtil.py | jeffkimbrel/MergeMetabolicAnnotations | 1 | 7155 | <reponame>jeffkimbrel/MergeMetabolicAnnotations<filename>lib/MergeMetabolicAnnotations/utils/CompareAnnotationsUtil.py<gh_stars>1-10
import os
import datetime
import logging
import json
import uuid
from installed_clients.WorkspaceClient import Workspace as Workspace
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.annotation_ontology_apiServiceClient import annotation_ontology_api
import MergeMetabolicAnnotations.utils.functions as f
class CompareAnnotationsUtil:
def __init__(self, config):
self.config = config
self.timestamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
self.callback_url = config['SDK_CALLBACK_URL']
self.scratch = config['scratch']
self.kbr = KBaseReport(self.callback_url)
self.anno_api = annotation_ontology_api()
self.ws_client = Workspace(config["workspace-url"])
def run(self, ctx, params):
get_ontology_results = self.anno_api.get_annotation_ontology_events({
"input_ref": params['genome'],
"workspace-url": self.config["workspace-url"]
})
ontology_selected = f.filter_selected_ontologies(
get_ontology_results, params, workflow="compare")
with open(os.path.join(self.scratch, "get_ontology_dump.json"), 'w') as outfile:
json.dump(ontology_selected, outfile, indent=2)
# make reports
html_reports = []
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
os.mkdir(output_directory)
event_summary = f.get_event_lists(ontology_selected)
html_reports = f.compare_report_stack(html_reports, event_summary, output_directory)
# finalize html reports
report_params = {
'message': '',
'html_links': html_reports,
'direct_html_link_index': 0,
'workspace_name': params['workspace_name'],
'report_object_name': f'compare_annotations_{uuid.uuid4()}'}
report_output = self.kbr.create_extended_report(report_params)
return {'report_name': report_output['name'],
'report_ref': report_output['ref']}
| 1.9375 | 2 |
models/__init__.py | TvSeriesFans/CineMonster | 15 | 7156 | from models.Model import Player, Group, Session, engine
| 1.0625 | 1 |
src/backend/tests/test_game/test_models.py | ToJestKrzysio/TheJungleGame | 0 | 7157 | <filename>src/backend/tests/test_game/test_models.py
from unittest.mock import Mock, patch
import numpy as np
from game.models import ValuePolicyModel
def test_predict():
mask = np.zeros((9, 7, 8), dtype=bool)
mask[1, 2, 3] = 1
mask[6, 6, 6] = 1
tensor_mock = Mock()
policy_tensor = np.zeros((9, 7, 8), dtype=float)
policy_tensor[0, 0, 0] = 10
policy_tensor[1, 2, 3] = 100
policy_tensor[6, 6, 6] = 100
policy_tensor = policy_tensor.reshape(-1)
value = np.array([[0.7]], dtype=float)
get_prediction_mock = Mock(return_value=(value, policy_tensor))
network_mock = Mock(spec=ValuePolicyModel, output_shape=(9, 7, 8),
input_shape=(9, 7, 178), _get_prediction=get_prediction_mock)
result_value, result_policy = ValuePolicyModel.predict(
self=network_mock, tensor=tensor_mock, mask=mask)
get_prediction_mock.assert_called_once_with(tensor_mock)
expected_value = 0.7
expected_policy = np.zeros((9, 7, 8))
expected_policy[1, 2, 3] = 0.5
expected_policy[6, 6, 6] = 0.5
assert isinstance(result_value, float)
assert result_value == expected_value
assert np.array_equal(result_policy, expected_policy)
| 2.59375 | 3 |
sina_spider/items.py | yanwen0614/Weibo | 0 | 7158 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class TweetsItem(Item):
# define the fields for your item here like:
Author = Field()
Title = Field()
Create_time = Field()
Id = Field()
Context = Field()
Source = Field()
Url = Field()
class TopicItem(Item):
Url = Field()
Title = Field()
Category = Field()
context = Field()
Id = Field()
Hotlevel = Field()
Time = Field()
def main():
item = TopicItem()
pass
if __name__ == '__main__':
main() | 2.578125 | 3 |
emission/core/wrapper/client.py | Andrew-Tan/e-mission-server | 0 | 7159 | <filename>emission/core/wrapper/client.py
import json
import logging
import dateutil.parser
from datetime import datetime
# Our imports
from emission.core.get_database import get_profile_db, get_client_db, get_pending_signup_db
import emission.clients.common
class Client:
def __init__(self, clientName):
# TODO: write background process to ensure that there is only one client with each name
# Maybe clean up unused clients?
self.clientName = clientName
self.settings_filename = "emission/clients/%s/settings.json" % self.clientName
self.__reload()
def __reload(self):
self.clientJSON = None
if self.clientName is not None:
self.clientJSON = get_client_db().find_one({'name': self.clientName})
# clientJSON can be None if we are creating an entry for the first time
if self.clientJSON is None:
# Avoid Attribute error while trying to determine whether the client is active
self.startDatetime = None
self.endDatetime = None
else:
# Do eagerly or lazily? Can also do super lazily and have
self.startDatetime = dateutil.parser.parse(self.clientJSON['start_date'])
self.endDatetime = dateutil.parser.parse(self.clientJSON['end_date'])
def isActive(self, now):
logging.debug("Comparing %s to %s and %s" % (now, self.startDatetime, self.endDatetime))
if self.startDatetime is None:
return False
else:
if self.startDatetime > now:
# Study has not yet started
return False
else:
if self.endDatetime is None:
# Study has no end time
return True
else:
if self.endDatetime > now:
# study has not yet ended
return True
else:
# study has already ended
return False
# Smart settings call, which returns the override settings if the client is
# active, and
def getSettings(self):
if (self.isActive(datetime.now())):
logging.debug("For client %s, returning settings %s" % (self.clientName, self.clientJSON['client_settings']))
return self.clientJSON['client_settings']
else:
# Returning empty dict instead of None to make the client code, which
# will want to merge this, easier
logging.debug("For client %s, active = false, returning {}" % (self.clientName))
return {}
def getDates(self):
return (self.startDatetime, self.endDatetime)
# Figure out if the JSON object here should always be passed in
# Having it be passed in is a lot more flexible
# Let's compromise for now by passing it in and seeing how much of a hassle it is
# That will also ensure that the update_client script is not a complete NOP
def __update(self, newEntry):
get_client_db().update({'name': self.clientName}, newEntry, upsert = True)
self.__reload()
def update(self, createKey = True):
import uuid
newEntry = json.load(open(self.settings_filename))
if createKey:
newEntry['key'] = str(uuid.uuid4())
# logging.info("Updating with new entry %s" % newEntry)
self.__update(newEntry)
return newEntry['key']
def __loadModule(self):
import importlib
clientModule = importlib.import_module("emission.clients.%s.%s" % (self.clientName, self.clientName))
return clientModule
def callMethod(self, methodName, request):
clientModule = self.__loadModule()
logging.debug("called client with %s %s" % (self.clientName, methodName))
# import clients.carshare.carshare as clientModule
method = getattr(clientModule, methodName)
logging.debug("Invoking %s on module %s" % (method, clientModule))
return method(request)
def getClientKey(self):
if self.clientJSON is None:
return None
logging.debug("About to return %s from JSON %s" % (self.clientJSON['key'], self.clientJSON))
return self.clientJSON['key']
def __validateKey(self, clientKey):
if (not self.isActive(datetime.now())):
logging.info("Client %s is not yet active, so key %s is not valid" %
(self.clientName, clientKey))
return False
client_key = self.getClientKey()
if client_key == clientKey:
return True
else:
logging.info("For client %s, incoming key %s does not match stored key %s!" %
(self.clientName, clientKey, client_key))
return False
# What should we do if a user registers again after they have installed the app?
# Options are:
# - NOP
# - update the study field
# - Return error
# For now, we update the study field, pending discussions with Maita on error reporting
# What should we do if a user registers for a study after having installed
# the app or having participated in a different study?
# - add the study to the list of registered studies (but we don't support multiple studies!)
# - update the registered study
# - return an error
# For now, we update the registered study since it makes life easiest for us
# TODO: Figure out what to do here
# Also, note that always inserting it is also fine if we move to an eventual
# consistency model, since we will eventually clean it up again. The end
# result will still be a NOP, though
def __preRegister(self, userEmail):
from emission.core.wrapper.user import User
from emission.analysis.result import userclient
if User.isRegistered(userEmail):
User.fromEmail(userEmail).setStudy(self.clientName)
else:
pendingDoc = {
'user_email': userEmail,
'study': self.clientName,
'last_update': datetime.now()}
# Should I do insert or upsert here? If a user has pre-registered for one
# study and then pre-registers for another study before registering, do we
# want to throw an error or just update silently?
# Update silently for now
writeResult = get_pending_signup_db().update({'user_email': userEmail}, pendingDoc, upsert=True)
print 'in __preRegister, writeResult = %s' % writeResult
if 'err' in writeResult and writeResult['err'] is not None:
e = Exception()
e.code = writeResult['err'][0]["code"]
e.msg = writeResult['err'][0]["errmsg"]
raise e
return (get_pending_signup_db().find({'study': self.clientName}).count(),
userclient.countForStudy(self.clientName))
def preRegister(self, clientKey, userEmail):
if not self.__validateKey(clientKey):
e = Exception()
e.code = 403
e.msg = "This is not the client key for your study, or your study has already ended. Please contact <EMAIL> to obtain a client key, or restart your study"
raise e
return self.__preRegister(userEmail)
def __callJavascriptCallback(self, methodName, params):
if self.isActive(datetime.now()):
clientModule = self.__loadModule()
method = getattr(clientModule, methodName)
return method(params)
else:
return None
def callJavascriptCallback(self, clientKey, method, request):
if not self.__validateKey(clientKey):
e = Exception()
e.code = 403
e.msg = "This is not the client key for your study, or your study has already ended. Please contact <EMAIL> to obtain a client key, or restart your study"
raise e
return self.__callJavascriptCallback(method, request)
# BEGIN: Standard customization hooks
def getClientConfirmedModeQuery(self, mode):
if self.isActive(datetime.now()):
clientModeField = self.getClientConfirmedModeField()
return {clientModeField: mode}
else:
return {}
def getClientConfirmedModeField(self):
if self.isActive(datetime.now()):
clientModule = self.__loadModule()
return clientModule.getClientConfirmedModeField()
else:
return None
def getSectionFilter(self, uuid):
if self.isActive(datetime.now()):
return self.__loadModule().getSectionFilter(uuid)
else:
return []
def getResult(self, uuid):
if self.isActive(datetime.now()):
return self.__loadModule().getResult(uuid)
else:
return None
def clientSpecificSetters(self, uuid, sectionId, predictedModeMap):
if self.isActive(datetime.now()):
return self.__loadModule().clientSpecificSetters(uuid, sectionId, predictedModeMap)
else:
return None
def runBackgroundTasks(self, uuid):
if self.isActive(datetime.now()):
self.__loadModule().runBackgroundTasks(uuid)
else:
logging.debug("Client is not active, skipping call...")
# END: Standard customization hooks
# This reads the combined set of queries from all clients
# Read the design decisions for an example of how to improve this
@staticmethod
def getClientConfirmedModeQueries(mode):
queryList = emission.clients.common.getConfirmFields()
queryListWithMode = [{query: mode} for query in queryList]
return [{'$or': queryListWithMode}]
@staticmethod
def getPendingClientRegs(userName):
studyList = []
userEmailQuery = {'user_email': userName}
pendingReg = get_pending_signup_db().find_one(userEmailQuery)
if pendingReg != None:
studyList = [pendingReg['study']]
return studyList
@staticmethod
def deletePendingClientRegs(userName):
userEmailQuery = {'user_email': userName}
get_pending_signup_db().remove(userEmailQuery)
| 2.3125 | 2 |
setup.py | karianjahi/advent_of_code | 0 | 7160 | <filename>setup.py
import setuptools
setuptools.setup(name='advent_of_code') | 0.792969 | 1 |
src/csvutils.py | imco/nmx | 0 | 7161 | <gh_stars>0
def escapeQuotes(string):
return string.replace('"','""');
| 1.851563 | 2 |
test/sanity_import_vpp_papi.py | amithbraj/vpp | 751 | 7162 | #!/usr/bin/env python3
""" sanity check script """
import vpp_papi
| 1.132813 | 1 |
examples/labs/demo_dmtx.py | yarikoptic/nipy | 0 | 7163 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Examples of design matrices specification and and computation (event-related
design, FIR design, etc)
Requires matplotlib
Author : <NAME>: 2009-2010
"""
print(__doc__)
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.modalities.fmri.design_matrix import make_dmtx
from nipy.modalities.fmri.experimental_paradigm import (EventRelatedParadigm,
BlockParadigm)
# frame times
tr = 1.0
nscans = 128
frametimes = np.linspace(0, (nscans - 1) * tr, nscans)
# experimental paradigm
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c3', 'c3', 'c3']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
hrf_model = 'canonical'
motion = np.cumsum(np.random.randn(128, 6), 0)
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
#event-related design matrix
paradigm = EventRelatedParadigm(conditions, onsets)
X1 = make_dmtx(
frametimes, paradigm, drift_model='polynomial', drift_order=3,
add_regs=motion, add_reg_names=add_reg_names)
# block design matrix
duration = 7 * np.ones(9)
paradigm = BlockParadigm(con_id=conditions, onset=onsets,
duration=duration)
X2 = make_dmtx(frametimes, paradigm, drift_model='polynomial',
drift_order=3)
# FIR model
paradigm = EventRelatedParadigm(conditions, onsets)
hrf_model = 'FIR'
X3 = make_dmtx(frametimes, paradigm, hrf_model='fir',
drift_model='polynomial', drift_order=3,
fir_delays=np.arange(1, 6))
# plot the results
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot(1, 3, 1)
X1.show(ax=ax)
ax.set_title('Event-related design matrix', fontsize=12)
ax = plt.subplot(1, 3, 2)
X2.show(ax=ax)
ax.set_title('Block design matrix', fontsize=12)
ax = plt.subplot(1, 3, 3)
X3.show(ax=ax)
ax.set_title('FIR design matrix', fontsize=12)
plt.subplots_adjust(top=0.9, bottom=0.25)
plt.show()
| 2.453125 | 2 |
fbpcs/private_computation/test/service/test_private_computation.py | yelixu2/fbpcs | 0 | 7164 | <filename>fbpcs/private_computation/test/service/test_private_computation.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from collections import defaultdict
from typing import List, Optional, Tuple
from unittest.mock import MagicMock, call, patch
from fbpcp.entity.container_instance import ContainerInstance, ContainerInstanceStatus
from fbpcp.service.mpc import MPCInstanceStatus, MPCParty, MPCService
from fbpcp.service.onedocker import OneDockerService
from fbpcs.common.entity.pcs_mpc_instance import PCSMPCInstance
from fbpcs.data_processing.lift_id_combiner.lift_id_spine_combiner_cpp import (
CppLiftIdSpineCombinerService,
)
from fbpcs.data_processing.sharding.sharding_cpp import CppShardingService
from fbpcs.onedocker_binary_config import OneDockerBinaryConfig
from fbpcs.onedocker_binary_names import OneDockerBinaryNames
from fbpcs.onedocker_service_config import OneDockerServiceConfig
from fbpcs.pcf.tests.async_utils import to_sync
from fbpcs.pid.entity.pid_instance import (
PIDInstance,
PIDInstanceStatus,
PIDProtocol,
PIDRole,
)
from fbpcs.pid.service.pid_service.pid import PIDService
from fbpcs.private_computation.entity.private_computation_instance import (
PrivateComputationGameType,
PrivateComputationInstance,
PrivateComputationInstanceStatus,
PrivateComputationRole,
UnionedPCInstance,
)
from fbpcs.private_computation.entity.private_computation_stage_type import (
PrivateComputationStageType,
)
from fbpcs.private_computation.repository.private_computation_game import GameNames
from fbpcs.private_computation.service.errors import (
PrivateComputationServiceValidationError,
)
from fbpcs.private_computation.service.private_computation import (
PrivateComputationService,
NUM_NEW_SHARDS_PER_FILE,
DEFAULT_K_ANONYMITY_THRESHOLD,
)
from fbpcs.private_computation.service.private_computation_stage_service import (
PrivateComputationStageService,
)
# TODO T94666166: libfb won't work in OSS
from libfb.py.asyncio.mock import AsyncMock
from libfb.py.testutil import data_provider
from fbpcs.private_computation.service.utils import (
create_and_start_mpc_instance,
gen_mpc_game_args_to_retry,
map_private_computation_role_to_mpc_party,
DEFAULT_CONTAINER_TIMEOUT_IN_SEC,
)
def _get_valid_stages_data() -> List[Tuple[PrivateComputationStageType]]:
return [
(PrivateComputationStageType.ID_MATCH,),
(PrivateComputationStageType.COMPUTE,),
(PrivateComputationStageType.AGGREGATE,),
(PrivateComputationStageType.POST_PROCESSING_HANDLERS,),
]
class TestPrivateComputationService(unittest.TestCase):
def setUp(self):
container_svc_patcher = patch("fbpcp.service.container_aws.AWSContainerService")
storage_svc_patcher = patch("fbpcp.service.storage_s3.S3StorageService")
mpc_instance_repo_patcher = patch(
"fbpcs.common.repository.mpc_instance_local.LocalMPCInstanceRepository"
)
pid_instance_repo_patcher = patch(
"fbpcs.pid.repository.pid_instance_local.LocalPIDInstanceRepository"
)
private_computation_instance_repo_patcher = patch(
"fbpcs.private_computation.repository.private_computation_instance_local.LocalPrivateComputationInstanceRepository"
)
mpc_game_svc_patcher = patch("fbpcp.service.mpc_game.MPCGameService")
container_svc = container_svc_patcher.start()
storage_svc = storage_svc_patcher.start()
mpc_instance_repository = mpc_instance_repo_patcher.start()
pid_instance_repository = pid_instance_repo_patcher.start()
private_computation_instance_repository = (
private_computation_instance_repo_patcher.start()
)
mpc_game_svc = mpc_game_svc_patcher.start()
for patcher in (
container_svc_patcher,
storage_svc_patcher,
mpc_instance_repo_patcher,
pid_instance_repo_patcher,
private_computation_instance_repo_patcher,
mpc_game_svc_patcher,
):
self.addCleanup(patcher.stop)
self.onedocker_service_config = OneDockerServiceConfig(
task_definition="test_task_definition",
)
self.onedocker_binary_config_map = defaultdict(
lambda: OneDockerBinaryConfig(
tmp_directory="/test_tmp_directory/", binary_version="latest"
)
)
self.onedocker_service = OneDockerService(
container_svc, self.onedocker_service_config.task_definition
)
self.mpc_service = MPCService(
container_svc=container_svc,
instance_repository=mpc_instance_repository,
task_definition="test_task_definition",
mpc_game_svc=mpc_game_svc,
)
self.pid_service = PIDService(
instance_repository=pid_instance_repository,
storage_svc=storage_svc,
onedocker_svc=self.onedocker_service,
onedocker_binary_config_map=self.onedocker_binary_config_map,
)
self.private_computation_service = PrivateComputationService(
instance_repository=private_computation_instance_repository,
mpc_svc=self.mpc_service,
pid_svc=self.pid_service,
onedocker_svc=self.onedocker_service,
onedocker_binary_config_map=self.onedocker_binary_config_map,
)
self.test_private_computation_id = "test_private_computation_id"
self.test_num_containers = 2
self.test_input_path = "in_path"
self.test_output_dir = "out_dir"
self.test_game_type = PrivateComputationGameType.LIFT
self.test_concurrency = 1
def test_create_instance(self):
test_role = PrivateComputationRole.PUBLISHER
self.private_computation_service.create_instance(
instance_id=self.test_private_computation_id,
role=test_role,
game_type=self.test_game_type,
input_path=self.test_input_path,
output_dir=self.test_output_dir,
num_pid_containers=self.test_num_containers,
num_mpc_containers=self.test_num_containers,
concurrency=self.test_concurrency,
num_files_per_mpc_container=NUM_NEW_SHARDS_PER_FILE,
)
# check instance_repository.create is called with the correct arguments
self.private_computation_service.instance_repository.create.assert_called()
args = self.private_computation_service.instance_repository.create.call_args[0][
0
]
self.assertEqual(self.test_private_computation_id, args.instance_id)
self.assertEqual(test_role, args.role)
self.assertEqual(PrivateComputationInstanceStatus.CREATED, args.status)
def test_update_instance(self):
test_pid_id = self.test_private_computation_id + "_id_match"
test_pid_protocol = PIDProtocol.UNION_PID
test_pid_role = PIDRole.PUBLISHER
test_input_path = "pid_in"
test_output_path = "pid_out"
# create one PID instance to be put into PrivateComputationInstance
pid_instance = PIDInstance(
instance_id=test_pid_id,
protocol=test_pid_protocol,
pid_role=test_pid_role,
num_shards=self.test_num_containers,
input_path=test_input_path,
output_path=test_output_path,
status=PIDInstanceStatus.STARTED,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.ID_MATCHING_STARTED,
instances=[pid_instance],
)
updated_pid_instance = pid_instance
updated_pid_instance.status = PIDInstanceStatus.COMPLETED
self.private_computation_service.pid_svc.update_instance = MagicMock(
return_value=updated_pid_instance
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# call update on the PrivateComputationInstance
updated_instance = self.private_computation_service.update_instance(
instance_id=self.test_private_computation_id
)
# check update instance called on the right pid instance
self.private_computation_service.pid_svc.update_instance.assert_called()
self.assertEqual(
test_pid_id,
self.private_computation_service.pid_svc.update_instance.call_args[0][0],
)
# check update instance called on the right private lift instance
self.private_computation_service.instance_repository.update.assert_called()
self.assertEqual(
private_computation_instance,
self.private_computation_service.instance_repository.update.call_args[0][0],
)
# check updated_instance has new status
self.assertEqual(
PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED,
updated_instance.status,
)
# create one MPC instance to be put into PrivateComputationInstance
test_mpc_id = "test_mpc_id"
mpc_instance = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=GameNames.LIFT.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_STARTED,
instances=[mpc_instance],
)
updated_mpc_instance = mpc_instance
updated_mpc_instance.status = MPCInstanceStatus.COMPLETED
self.private_computation_service.mpc_svc.update_instance = MagicMock(
return_value=updated_mpc_instance
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# call update on the PrivateComputationInstance
updated_instance = self.private_computation_service.update_instance(
instance_id=self.test_private_computation_id
)
# check update instance called on the right mpc instance
self.private_computation_service.mpc_svc.update_instance.assert_called()
self.assertEqual(
test_mpc_id,
self.private_computation_service.mpc_svc.update_instance.call_args[0][0],
)
# check update instance called on the right private lift instance
self.private_computation_service.instance_repository.update.assert_called()
self.assertEqual(
private_computation_instance,
self.private_computation_service.instance_repository.update.call_args[0][0],
)
# check updated_instance has new status
self.assertEqual(
PrivateComputationInstanceStatus.COMPUTATION_COMPLETED,
updated_instance.status,
)
@staticmethod
def _get_dummy_stage_svc(
stage_type: PrivateComputationStageType,
) -> PrivateComputationStageService:
"""create a DummyTestStageService class and instantiate an instance of it"""
return type(
"DummyTestStageService",
(PrivateComputationStageService,),
{
"run_async": AsyncMock(
# run_async will return whatever pc_instance privatelift.run_stage passes it
side_effect=lambda pc_instance, *args, **kwargs: pc_instance
),
"stage_type": stage_type,
},
)()
@data_provider(_get_valid_stages_data)
def test_run_stage_correct_stage_order(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage runs stage_svc when the stage_svc is the next stage in the sequence
"""
################# PREVIOUS STAGE COMPLETED OR RETRY #######################
stage_svc = self._get_dummy_stage_svc(stage_type)
for status in (
stage_type.previous_stage.completed_status,
stage_type.failed_status,
):
pl_instance = self.create_sample_instance(status=status)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc
)
self.assertEqual(pl_instance.status, stage_type.start_status)
@data_provider(_get_valid_stages_data)
def test_run_stage_status_already_started(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage does not run stage_svc when the instance status is already started
"""
################# CURRENT STAGE STATUS NOT VALID #######################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(status=stage_type.start_status)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
with self.assertRaises(ValueError):
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc
)
@data_provider(_get_valid_stages_data)
def test_run_stage_out_of_order_with_dry_run(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage runs stage_svc out of order when dry run is passed
"""
################ STAGE OUT OF ORDER WITH DRY RUN #####################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.UNKNOWN
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc, dry_run=True
)
self.assertEqual(pl_instance.status, stage_type.start_status)
@data_provider(_get_valid_stages_data)
def test_run_stage_out_of_order_without_dry_run(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage does not run stage_svc out of order when dry run is not passed
"""
####################### STAGE OUT OF ORDER NO DRY RUN ############################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.UNKNOWN
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
with self.assertRaises(ValueError):
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc, dry_run=False
)
@data_provider(_get_valid_stages_data)
def test_run_stage_partner_no_server_ips(
self, stage_type: PrivateComputationStageType
) -> None:
"""
tests that run_stage does not if role is partner and no server ips are specified
"""
####################### PARTNER NO SERVER IPS ############################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(
status=stage_type.previous_stage.completed_status,
role=PrivateComputationRole.PARTNER,
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
with self.assertRaises(ValueError):
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc
)
@data_provider(_get_valid_stages_data)
def test_run_stage_fails(self, stage_type: PrivateComputationStageType) -> None:
"""
tests that statuses are set properly when a run fails
"""
######################### STAGE FAILS ####################################
stage_svc = self._get_dummy_stage_svc(stage_type)
pl_instance = self.create_sample_instance(
status=stage_type.previous_stage.completed_status
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=pl_instance
)
# create a custom exception class to make sure we have a unique exception for the test
stage_failure_exception = type("TestStageFailureException", (Exception,), {})
stage_svc.run_async = AsyncMock(side_effect=stage_failure_exception())
with self.assertRaises(stage_failure_exception):
pl_instance = self.private_computation_service.run_stage(
pl_instance.instance_id, stage_svc
)
self.assertEqual(pl_instance.status, stage_type.failed_status)
def test_partner_missing_server_ips(self):
test_private_computation_id = "test_private_computation_id"
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED,
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# exception because role is partner but server ips are not given
with self.assertRaises(ValueError):
self.private_computation_service.aggregate_shards(
instance_id=test_private_computation_id,
)
@patch("fbpcp.service.mpc.MPCService")
@patch(
"fbpcs.private_computation.service.private_computation.create_and_start_mpc_instance"
)
def test_aggregate_shards(self, mock_create_and_start_mpc_instance, mock_mpc_svc):
# construct a private_computation_instance with an mpc_instance handling metrics computation
test_mpc_id = self.test_private_computation_id + "_compute_metrics"
mpc_instance = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=GameNames.LIFT.value,
mpc_party=MPCParty.SERVER,
num_workers=self.test_num_containers,
status=MPCInstanceStatus.COMPLETED,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_COMPLETED,
instances=[mpc_instance],
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
mock_mpc_svc.update_instance = MagicMock(return_value=mpc_instance)
# call aggregate_shards
self.private_computation_service.aggregate_shards(
instance_id=self.test_private_computation_id,
server_ips=["192.0.2.0", "192.0.2.1"],
)
test_game_args = [
{
"input_base_path": private_computation_instance.compute_stage_output_base_path,
"metrics_format_type": "lift",
"num_shards": self.test_num_containers * NUM_NEW_SHARDS_PER_FILE,
"output_path": private_computation_instance.shard_aggregate_stage_output_path,
"threshold": private_computation_instance.k_anonymity_threshold,
"run_name": "",
}
]
# check a new MPC instance handling metrics aggregation was to be created
self.assertEqual(
GameNames.SHARD_AGGREGATOR.value,
mock_create_and_start_mpc_instance.call_args[1]["game_name"],
)
self.assertEqual(
test_game_args,
mock_create_and_start_mpc_instance.call_args[1]["game_args"],
)
self.private_computation_service.instance_repository.update.assert_called()
self.assertEqual(
PrivateComputationInstanceStatus.AGGREGATION_STARTED,
private_computation_instance.status,
)
@patch("fbpcp.service.mpc.MPCService")
@patch(
"fbpcs.private_computation.service.private_computation.create_and_start_mpc_instance"
)
def test_aggregate_shards_rerun(
self, mock_create_and_start_mpc_instance, mock_mpc_svc
):
# construct a private_computation_instance
test_private_computation_id = "test_private_computation_id"
mpc_instance = PCSMPCInstance.create_instance(
instance_id=test_private_computation_id + "_aggregate_shards",
game_name=GameNames.SHARD_AGGREGATOR.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
status=MPCInstanceStatus.FAILED,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.AGGREGATION_FAILED,
instances=[mpc_instance],
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
mock_mpc_svc.update_instance = MagicMock(return_value=mpc_instance)
# call aggregate_shards
self.private_computation_service.aggregate_shards(
instance_id=test_private_computation_id,
server_ips=["192.0.2.0", "192.0.2.1"],
)
# check that the retry counter has been incremented
self.assertEqual(private_computation_instance.retry_counter, 1)
# check a new MPC instance handling metrics aggregation was to be created
self.assertEqual(2, len(private_computation_instance.instances))
self.assertEqual(
test_private_computation_id + "_aggregate_shards1",
mock_create_and_start_mpc_instance.call_args[1]["instance_id"],
)
self.assertEqual(
PrivateComputationInstanceStatus.AGGREGATION_STARTED,
private_computation_instance.status,
)
@patch("fbpcp.service.mpc.MPCService")
@patch(
"fbpcs.private_computation.service.private_computation.create_and_start_mpc_instance"
)
def test_aggregate_shards_dry_run(
self, mock_create_and_start_mpc_instance, mock_mpc_svc
):
# construct a private_computation_instance
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_FAILED,
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# call aggregate_shards with ad-hoc input_path and num_shards
test_format_type = "lift"
test_game_args = [
{
"input_base_path": private_computation_instance.compute_stage_output_base_path,
"num_shards": self.test_num_containers * NUM_NEW_SHARDS_PER_FILE,
"metrics_format_type": test_format_type,
"output_path": private_computation_instance.shard_aggregate_stage_output_path,
"threshold": private_computation_instance.k_anonymity_threshold,
"run_name": "",
}
]
self.private_computation_service.aggregate_shards(
instance_id=self.test_private_computation_id,
server_ips=["192.0.2.0", "192.0.2.1"],
dry_run=True,
)
# check a new MPC instance handling metrics aggregation was to be created
# with the overwritten input_path and num_shards
self.assertEqual(
GameNames.SHARD_AGGREGATOR.value,
mock_create_and_start_mpc_instance.call_args[1]["game_name"],
)
self.assertEqual(
test_game_args,
mock_create_and_start_mpc_instance.call_args[1]["game_args"],
)
self.private_computation_service.instance_repository.update.assert_called()
self.assertEqual(
PrivateComputationInstanceStatus.AGGREGATION_STARTED,
private_computation_instance.status,
)
@to_sync
@patch("fbpcp.service.mpc.MPCService")
async def test_create_and_start_mpc_instance(self, mock_mpc_svc):
mock_mpc_svc.create_instance = MagicMock()
mock_mpc_svc.start_instance_async = AsyncMock()
instance_id = "test_instance_id"
game_name = GameNames.LIFT.value
mpc_party = MPCParty.CLIENT
num_containers = 4
input_file = "input_file"
output_file = "output_file"
input_directory = "input_directory"
output_directory = "output_directory"
server_ips = ["192.0.2.0", "192.0.2.1"]
game_args = {
"input_filenames": input_file,
"input_directory": input_directory,
"output_filenames": output_file,
"output_directory": output_directory,
"concurrency": 1,
}
binary_version = self.onedocker_binary_config_map[
OneDockerBinaryNames.LIFT_COMPUTE.value
].binary_version
await create_and_start_mpc_instance(
mpc_svc=mock_mpc_svc,
instance_id=instance_id,
game_name=game_name,
mpc_party=mpc_party,
num_containers=num_containers,
binary_version=binary_version,
container_timeout=DEFAULT_CONTAINER_TIMEOUT_IN_SEC,
server_ips=server_ips,
game_args=game_args,
)
# check create_instance and start_instance were called with the right parameters
self.assertEqual(
call(
instance_id=instance_id,
game_name=game_name,
mpc_party=mpc_party,
num_workers=num_containers,
game_args=game_args,
),
mock_mpc_svc.create_instance.call_args,
)
self.assertEqual(
call(
instance_id=instance_id,
server_ips=server_ips,
timeout=DEFAULT_CONTAINER_TIMEOUT_IN_SEC,
version=binary_version,
),
mock_mpc_svc.start_instance_async.call_args,
)
def test_map_private_computation_role_to_mpc_party(self):
self.assertEqual(
MPCParty.SERVER,
map_private_computation_role_to_mpc_party(PrivateComputationRole.PUBLISHER),
)
self.assertEqual(
MPCParty.CLIENT,
map_private_computation_role_to_mpc_party(PrivateComputationRole.PARTNER),
)
def test_get_status_from_stage(self):
# Test get status from an MPC stage
mpc_instance = PCSMPCInstance.create_instance(
instance_id="test_mpc_id",
game_name=GameNames.SHARD_AGGREGATOR.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
status=MPCInstanceStatus.FAILED,
)
self.assertEqual(
PrivateComputationInstanceStatus.AGGREGATION_FAILED,
self.private_computation_service._get_status_from_stage(mpc_instance),
)
# Test get status from the PID stage
pid_instance = PIDInstance(
instance_id="test_pid_id",
protocol=PIDProtocol.UNION_PID,
pid_role=PIDRole.PUBLISHER,
num_shards=4,
input_path="input",
output_path="output",
stages_containers={},
stages_status={},
status=PIDInstanceStatus.COMPLETED,
)
self.assertEqual(
PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED,
self.private_computation_service._get_status_from_stage(pid_instance),
)
def test_prepare_data(self):
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.CREATED,
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
with patch.object(
CppLiftIdSpineCombinerService,
"combine_on_container_async",
) as mock_combine, patch.object(
CppShardingService,
"shard_on_container_async",
) as mock_shard:
# call prepare_data
self.private_computation_service.prepare_data(
instance_id=self.test_private_computation_id,
dry_run=True,
)
binary_config = self.onedocker_binary_config_map[
OneDockerBinaryNames.LIFT_ID_SPINE_COMBINER.value
]
mock_combine.assert_called_once_with(
spine_path=private_computation_instance.pid_stage_output_spine_path,
data_path=private_computation_instance.pid_stage_output_data_path,
output_path=private_computation_instance.data_processing_output_path
+ "_combine",
num_shards=self.test_num_containers,
onedocker_svc=self.onedocker_service,
binary_version=binary_config.binary_version,
tmp_directory=binary_config.tmp_directory,
)
mock_shard.assert_called()
def test_prepare_data_tasks_skipped(self):
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_FAILED,
)
private_computation_instance.partial_container_retry_enabled = True
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
with patch.object(
CppLiftIdSpineCombinerService,
"combine_on_container_async",
) as mock_combine, patch.object(
CppShardingService,
"shard_on_container_async",
) as mock_shard:
# call prepare_data
self.private_computation_service.prepare_data(
instance_id=self.test_private_computation_id,
)
# expect combining and sharding skipped because this private_computation_instance has
# status PrivateComputationInstanceStatus.COMPUTATION_FAILED, so this run
# is to recover from a previous compute metrics failure, meaning data
# preparation should have been done
mock_combine.assert_not_called()
mock_shard.assert_not_called()
def test_validate_metrics_results_doesnt_match(self):
self.private_computation_service.pid_svc.storage_svc.read = MagicMock()
self.private_computation_service.pid_svc.storage_svc.read.side_effect = [
'{"subGroupMetrics":[],"metrics":{"controlClicks":1,"testSpend":0,"controlImpressions":0,"testImpressions":0,"controlMatchCount":0,"testMatchCount":0,"controlNumConvSquared":0,"testNumConvSquared":0,"testValueSquared":0,"controlValue":0,"testValue":0,"testConverters":0,"testConversions":0,"testPopulation":0,"controlClickers":0,"testClickers":0,"controlReach":0,"testReach":0,"controlSpend":0,"testClicks":0,"controlValueSquared":0,"controlConverters":0,"controlConversions":0,"controlPopulation":0}}',
'{"subGroupMetrics":[],"metrics":{"testSpend":0,"controlClicks":0,"controlImpressions":0,"testImpressions":0,"controlMatchCount":0,"testMatchCount":0,"controlNumConvSquared":0,"testNumConvSquared":0,"testValueSquared":0,"controlValue":0,"testValue":0,"testConverters":0,"testConversions":0,"testPopulation":0,"controlClickers":0,"testClickers":0,"controlReach":0,"testReach":0,"controlSpend":0,"testClicks":0,"controlValueSquared":0,"controlConverters":0,"controlConversions":0,"controlPopulation":0}}',
]
with self.assertRaises(PrivateComputationServiceValidationError):
self.private_computation_service.validate_metrics(
instance_id="test_id",
aggregated_result_path="aggregated_result_path",
expected_result_path="expected_result_path",
)
def test_cancel_current_stage(self):
test_mpc_id = self.test_private_computation_id + "_compute_metrics"
test_game_name = GameNames.LIFT.value
test_mpc_party = MPCParty.CLIENT
# prepare the pl instance that will be read in to memory from the repository
# at the beginning of the cancel_current_stage function
mpc_instance_started = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=test_game_name,
mpc_party=test_mpc_party,
num_workers=self.test_num_containers,
status=MPCInstanceStatus.STARTED,
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_STARTED,
role=PrivateComputationRole.PARTNER,
instances=[mpc_instance_started],
)
self.private_computation_service.instance_repository.read = MagicMock(
return_value=private_computation_instance
)
# prepare the mpc instance that's returned from mpc_service.stop_instance()
mpc_instance_canceled = PCSMPCInstance.create_instance(
instance_id=test_mpc_id,
game_name=test_game_name,
mpc_party=test_mpc_party,
num_workers=self.test_num_containers,
status=MPCInstanceStatus.CANCELED,
)
self.private_computation_service.mpc_svc.stop_instance = MagicMock(
return_value=mpc_instance_canceled
)
self.private_computation_service.mpc_svc.instance_repository.read = MagicMock(
return_value=mpc_instance_canceled
)
# call cancel, expect no exception
private_computation_instance = (
self.private_computation_service.cancel_current_stage(
instance_id=self.test_private_computation_id,
)
)
# assert the pl instance returned has the correct status
self.assertEqual(
PrivateComputationInstanceStatus.COMPUTATION_FAILED,
private_computation_instance.status,
)
def test_gen_game_args_to_retry(self):
test_input = "test_input_retry"
mpc_instance = PCSMPCInstance.create_instance(
instance_id="mpc_instance",
game_name=GameNames.LIFT.value,
mpc_party=MPCParty.SERVER,
num_workers=2,
status=MPCInstanceStatus.FAILED,
containers=[
ContainerInstance(
instance_id="container_instance_0",
status=ContainerInstanceStatus.FAILED,
),
ContainerInstance(
instance_id="container_instance_1",
status=ContainerInstanceStatus.COMPLETED,
),
],
game_args=[
{
"input_filenames": test_input,
},
{
"input_filenames": "input_filenames",
},
],
)
private_computation_instance = self.create_sample_instance(
status=PrivateComputationInstanceStatus.COMPUTATION_FAILED,
instances=[mpc_instance],
)
game_args = gen_mpc_game_args_to_retry(
private_computation_instance
)
self.assertEqual(1, len(game_args)) # only 1 failed container
self.assertEqual(test_input, game_args[0]["input_filenames"])
def create_sample_instance(
self,
status: PrivateComputationInstanceStatus,
role: PrivateComputationRole = PrivateComputationRole.PUBLISHER,
instances: Optional[List[UnionedPCInstance]] = None,
) -> PrivateComputationInstance:
return PrivateComputationInstance(
instance_id=self.test_private_computation_id,
role=role,
instances=instances or [],
status=status,
status_update_ts=1600000000,
num_pid_containers=self.test_num_containers,
num_mpc_containers=self.test_num_containers,
concurrency=self.test_concurrency,
num_files_per_mpc_container=NUM_NEW_SHARDS_PER_FILE,
game_type=PrivateComputationGameType.LIFT,
input_path=self.test_input_path,
output_dir=self.test_output_dir,
fail_fast=True,
k_anonymity_threshold=DEFAULT_K_ANONYMITY_THRESHOLD,
)
| 1.648438 | 2 |
app.py | Eubule/Store-Manager-With-Datastructure | 0 | 7165 | from app import app
from app.database.db import Database
if __name__ == "__main__":
db = Database()
db.create_tables()
db.create_admin()
app.run(debug=True) | 1.6875 | 2 |
src/main.py | ryuichi1208/scraping-py | 2 | 7166 | # -*- coding: utf-8 -*-
# flake8: noqa
from flask import Flask
from flask_themes2 import Themes
import config
from util.auth import is_admin
from util.converter import RegexConverter
from util.csrf import generate_csrf_token
app = Flask(__name__.split('.')[0])
app.secret_key = config.SECRET_KEY
app.url_map.converters['regex'] = RegexConverter
app.jinja_env.globals['config'] = config
app.jinja_env.globals['csrf_token'] = generate_csrf_token
app.jinja_env.globals['is_admin'] = is_admin
Themes(app, app_identifier='yelplove')
# if debug property is present, let's use it
try:
app.debug = config.DEBUG
except AttributeError:
app.debug = False
import views
| 1.859375 | 2 |
HoursSelect.py | Maxahoy/ClassVolumeSilencer | 0 | 7167 | <gh_stars>0
"""
This is how I'm gonna schedule hours
IDEA: import the format example file that I'm using and is saved in the same directory
"""
import csv
import pprint
from tkinter import *
from tkinter.filedialog import askopenfilename
import StringProcessing
def selectHoursFile():
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
print(filename)
return filename
"""
Receives a file location, opens the csv
The format looks like this:
CLASS STARTS,Class name (optional),MON,TUES,WED,THURS,FRI,,CLASS ENDS,MON,TUES,WED,THURS,FRI
1, Stats, 10:20:00 AM,,10:20:00 AM,,10:20:00 AM,,,11:15:00 AM,,11:15:00 AM,,11:15:00 AM
2,,,09:35:00 AM,,09:35:00 AM,,,,,10:55:00 AM,,10:55:00 AM,
3,,,11:30:00 AM,11:30:00 AM,11:30:00 AM,11:30:00 AM,,,,12:25:00 PM,12:25:00 PM,12:25:00 PM,12:25:00 PM
4,,,,,,09:10:00 AM,,,,,,,10:05:00 AM
5,,12:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,,,,04:30:00 PM,04:30:00 PM,04:30:00 PM,04:30:00 PM
6,,,,,,,,,,,,,
7,,,,,,,,,,,,,
8,,,,,,,,,,,,,
9,,,,,,,,,,,,,
10,,,,,,,,,,,,,
11,,,,,,,,,,,,,
12,,,,,,,,,,,,,
13,,,,,,,,,,,,,
14,,,,,,,,,,,,,
15,,,,,,,,,,,,,
"""
def interpretCSVFormat(csvFile):
#first open the file with the filepath
classList = dict()
with open(csvFile, "r") as csvOpen:
#next populate a temporary dictionary for the classes
tempDict = dict()
classID = 0
rowReader = csv.reader(csvOpen, delimiter=',', quotechar="'")
for row in rowReader:
#dictionary format: class ID::string of class days
classTimes = row
#print(row)
tempDict[classID] = str(classTimes)
classID = classID + 1
print(StringProcessing.lineList(str(classTimes)))
del tempDict[0]
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(tempDict)
#TODO: make the sections using ClassScheduleStorage
| 3.109375 | 3 |
src/bsmu/bone_age/models/dense_net/configs.py | IvanKosik/bone-age-models | 0 | 7168 | from pathlib import Path
from bsmu.bone_age.models import constants
IMAGE_DIR = Path('C:/MyDiskBackup/Projects/BoneAge/Data/SmallImages500_NoPads')
TRAIN_DATA_CSV_PATH = constants.TRAIN_DATA_CSV_PATH
VALID_DATA_CSV_PATH = constants.VALID_DATA_CSV_PATH
TEST_DATA_CSV_PATH = constants.TEST_DATA_CSV_PATH
BATCH_SIZE = 7
MODEL_NAME_PREFIX = 'DenseNet169'
MODEL_NAME_POSTFIX = 'AllImages3_MoreAugments'
| 1.765625 | 2 |
projection.py | ogawan/nisa | 0 | 7169 | <gh_stars>0
from matplotlib import pyplot as plt
def nisa_projection(years=30, annual_deposit=80, initial_budget=100):
"""
This is a function to plot deposit of TSUMITATE NISA
Parameters:
---------------
years: integer
How many years are you going to continue?
annual_depoist: integer
Annual deposit into the NISA account.
initial_budget: integer
The initial budget.
Returns:
--------------
matplotlib figure
"""
for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]:
original = initial_budget
ganbon = []
box = []
for i in range(0,years):
if i == 0:
box.append(original)
ganbon.append(original)
gan = ganbon[-1] + annual_deposit
original = original * j + annual_deposit
if i > 0:
box.append(original)
ganbon.append(gan)
plt.scatter(list(range(0,years)), box)
plt.legend(["0%", "1%", "2%", "3%", "4%", "5%"])
plt.xlabel("Years")
plt.ylabel("Money (Man yen)")
# Reference: https://plotly.com/python/figure-labels/
import pandas as pd
import plotly.graph_objects as go
def nisa_projection_plotly(years=30, annual_deposit=80, initial_budget=100):
"""
This is a function to plot deposit of TSUMITATE NISA
Parameters:
---------------
years: integer
How many years are you going to continue?
annual_depoist: integer
Annual deposit into the NISA account.
initial_budget: integer
The initial budget.
Returns:
--------------
plotly figures.
"""
dic_ = {}
for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]:
original = initial_budget
ganbon = []
box = []
for i in range(0,years):
if i == 0:
box.append(original)
ganbon.append(original)
gan = ganbon[-1] + annual_deposit
original = original * j + annual_deposit
if i > 0:
box.append(original)
ganbon.append(gan)
dic_["{} %".format(str(j)[-1])] = box
df = pd.DataFrame(dic_)
fig = go.Figure()
for i in df.columns:
fig.add_trace(go.Scatter(x=df.index, y=df[i],name=i))
fig.update_layout(
title="NISA PLOT",
xaxis_title="Years",
yaxis_title="<NAME>",
width=500,
height=400,
)
fig.show()
nisa_projection(30, 80, 100)
nisa_projection_plotly(30, 80, 100)
| 3.40625 | 3 |
indico/modules/oauth/models/applications.py | yamiacat/indico | 0 | 7170 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from uuid import uuid4
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.ext.declarative import declared_attr
from werkzeug.urls import url_parse
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.modules.oauth import logger
from indico.util.i18n import _
from indico.util.struct.enum import IndicoEnum
SCOPES = {'read:user': _("User information (read only)"),
'read:legacy_api': _('Legacy API (read only)'),
'write:legacy_api': _('Legacy API (write only)'),
'registrants': _('Event registrants')}
class SystemAppType(int, IndicoEnum):
none = 0
checkin = 1
flower = 2
__enforced_data__ = {
checkin: {'default_scopes': {'registrants'},
'redirect_uris': ['http://localhost'],
'is_enabled': True},
flower: {'default_scopes': {'read:user'},
'is_enabled': True}
}
__default_data__ = {
checkin: {'is_trusted': True,
'name': 'Checkin App',
'description': 'The checkin app for mobile devices allows scanning ticket QR codes and '
'checking-in event participants.'},
flower: {'is_trusted': True,
'name': 'Flower',
'description': 'Flower allows monitoring Celery tasks. If flower is installed, this app is used to '
'restrict access to Indico administrators.'}
}
@property
def enforced_data(self):
return self.__enforced_data__.get(self, {})
@property
def default_data(self):
return dict(self.__default_data__.get(self, {}), **self.enforced_data)
class OAuthApplication(db.Model):
"""OAuth applications registered in Indico."""
__tablename__ = 'applications'
@declared_attr
def __table_args__(cls):
return (db.Index('ix_uq_applications_name_lower', db.func.lower(cls.name), unique=True),
db.Index(None, cls.system_app_type, unique=True,
postgresql_where=db.text(f'system_app_type != {SystemAppType.none.value}')),
{'schema': 'oauth'})
#: the unique id of the application
id = db.Column(
db.Integer,
primary_key=True
)
#: human readable name
name = db.Column(
db.String,
nullable=False
)
#: human readable description
description = db.Column(
db.Text,
nullable=False,
default=''
)
#: the OAuth client_id
client_id = db.Column(
UUID,
unique=True,
nullable=False,
default=lambda: str(uuid4())
)
#: the OAuth client_secret
client_secret = db.Column(
UUID,
nullable=False,
default=lambda: str(uuid4())
)
#: the OAuth default scopes the application may request access to
default_scopes = db.Column(
ARRAY(db.String),
nullable=False
)
#: the OAuth absolute URIs that a application may use to redirect to after authorization
redirect_uris = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
#: whether the application is enabled or disabled
is_enabled = db.Column(
db.Boolean,
nullable=False,
default=True
)
#: whether the application can access user data without asking for permission
is_trusted = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: the type of system app (if any). system apps cannot be deleted
system_app_type = db.Column(
PyIntEnum(SystemAppType),
nullable=False,
default=SystemAppType.none
)
# relationship backrefs:
# - tokens (OAuthToken.application)
@property
def client_type(self):
return 'public'
@property
def default_redirect_uri(self):
return self.redirect_uris[0] if self.redirect_uris else None
@property
def locator(self):
return {'id': self.id}
def __repr__(self): # pragma: no cover
return f'<OAuthApplication({self.id}, {self.name}, {self.client_id})>'
def reset_client_secret(self):
self.client_secret = str(uuid4())
logger.info("Client secret for %s has been reset.", self)
def validate_redirect_uri(self, redirect_uri):
"""Called by flask-oauthlib to validate the redirect_uri.
Uses a logic similar to the one at GitHub, i.e. protocol and
host/port must match exactly and if there is a path in the
whitelisted URL, the path of the redirect_uri must start with
that path.
"""
uri_data = url_parse(redirect_uri)
for valid_uri_data in map(url_parse, self.redirect_uris):
if (uri_data.scheme == valid_uri_data.scheme and uri_data.netloc == valid_uri_data.netloc and
uri_data.path.startswith(valid_uri_data.path)):
return True
return False
| 1.976563 | 2 |
PaddleNLP/unarchived/deep_attention_matching_net/utils/layers.py | FrancisLiang/models-1 | 3 | 7171 | <reponame>FrancisLiang/models-1
import paddle.fluid as fluid
def loss(x, y, clip_value=10.0):
"""Calculate the sigmoid cross entropy with logits for input(x).
Args:
x: Variable with shape with shape [batch, dim]
y: Input label
Returns:
loss: cross entropy
logits: prediction
"""
logits = fluid.layers.fc(
input=x,
size=1,
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(0.)))
loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits, label=y)
loss = fluid.layers.reduce_mean(
fluid.layers.clip(
loss, min=-clip_value, max=clip_value))
return loss, logits
def ffn(input, d_inner_hid, d_hid, name=None):
"""Position-wise Feed-Forward Network
"""
hidden = fluid.layers.fc(input=input,
size=d_inner_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(name=name + '_fc.w_0'),
bias_attr=fluid.ParamAttr(
name=name + '_fc.b_0',
initializer=fluid.initializer.Constant(0.)),
act="relu")
out = fluid.layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(name=name + '_fc.w_1'),
bias_attr=fluid.ParamAttr(
name=name + '_fc.b_1',
initializer=fluid.initializer.Constant(0.)))
return out
def dot_product_attention(query,
key,
value,
d_key,
q_mask=None,
k_mask=None,
dropout_rate=None,
mask_cache=None):
"""Dot product layer.
Args:
query: a tensor with shape [batch, Q_time, Q_dimension]
key: a tensor with shape [batch, time, K_dimension]
value: a tensor with shape [batch, time, V_dimension]
q_lengths: a tensor with shape [batch]
k_lengths: a tensor with shape [batch]
Returns:
a tensor with shape [batch, query_time, value_dimension]
Raises:
AssertionError: if Q_dimension not equal to K_dimension when attention
type is dot.
"""
logits = fluid.layers.matmul(
x=query, y=key, transpose_y=True, alpha=d_key**(-0.5))
if (q_mask is not None) and (k_mask is not None):
if mask_cache is not None and q_mask.name in mask_cache and k_mask.name in mask_cache[
q_mask.name]:
mask, another_mask = mask_cache[q_mask.name][k_mask.name]
else:
mask = fluid.layers.matmul(x=q_mask, y=k_mask, transpose_y=True)
another_mask = fluid.layers.scale(
mask,
scale=float(2**32 - 1),
bias=float(-1),
bias_after_scale=False)
if mask_cache is not None:
if q_mask.name not in mask_cache:
mask_cache[q_mask.name] = dict()
mask_cache[q_mask.name][k_mask.name] = [mask, another_mask]
logits = mask * logits + another_mask
attention = fluid.layers.softmax(logits)
if dropout_rate:
attention = fluid.layers.dropout(
input=attention, dropout_prob=dropout_rate, is_test=False, seed=2)
atten_out = fluid.layers.matmul(x=attention, y=value)
return atten_out
def block(name,
query,
key,
value,
d_key,
q_mask=None,
k_mask=None,
is_layer_norm=True,
dropout_rate=None,
mask_cache=None):
"""
"""
att_out = dot_product_attention(
query,
key,
value,
d_key,
q_mask,
k_mask,
dropout_rate,
mask_cache=mask_cache)
y = query + att_out
if is_layer_norm:
y = fluid.layers.layer_norm(
input=y,
begin_norm_axis=len(y.shape) - 1,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.),
name=name + '_layer_norm.w_0'),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.),
name=name + '_layer_norm.b_0'))
z = ffn(y, d_key, d_key, name)
w = y + z
if is_layer_norm:
w = fluid.layers.layer_norm(
input=w,
begin_norm_axis=len(w.shape) - 1,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(1.),
name=name + '_layer_norm.w_1'),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.),
name=name + '_layer_norm.b_1'))
return w
def cnn_3d(input, out_channels_0, out_channels_1, add_relu=True):
# same padding
conv_0 = fluid.layers.conv3d(
name="conv3d_0",
input=input,
num_filters=out_channels_0,
filter_size=[3, 3, 3],
padding=[1, 1, 1],
act="elu" if add_relu else None,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-0.01, high=0.01)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.0)))
# same padding
pooling_0 = fluid.layers.pool3d(
input=conv_0,
pool_type="max",
pool_size=3,
pool_padding=1,
pool_stride=3)
conv_1 = fluid.layers.conv3d(
name="conv3d_1",
input=pooling_0,
num_filters=out_channels_1,
filter_size=[3, 3, 3],
padding=[1, 1, 1],
act="elu" if add_relu else None,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-0.01, high=0.01)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.0)))
# same padding
pooling_1 = fluid.layers.pool3d(
input=conv_1,
pool_type="max",
pool_size=3,
pool_padding=1,
pool_stride=3)
return pooling_1
| 2.703125 | 3 |
plugin.video.saltsrd.lite/js2py/translators/jsregexps.py | TheWardoctor/wardoctors-repo | 1 | 7172 | <filename>plugin.video.saltsrd.lite/js2py/translators/jsregexps.py
from salts_lib.pyjsparser.pyjsparserdata import *
REGEXP_SPECIAL_SINGLE = {'\\', '^', '$', '*', '+', '?', '.'}
NOT_PATTERN_CHARS = {'^', '$', '\\', '.', '*', '+', '?', '(', ')', '[', ']', '|'} # what about '{', '}', ???
CHAR_CLASS_ESCAPE = {'d', 'D', 's', 'S', 'w', 'W'}
CONTROL_ESCAPE_CHARS = {'f', 'n', 'r', 't', 'v'}
CONTROL_LETTERS = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'}
def SpecialChar(char):
return {'type': 'SpecialChar',
'content': char}
def isPatternCharacter(char):
return char not in NOT_PATTERN_CHARS
class JsRegExpParser:
def __init__(self, source, flags):
self.source = source
self.flags = flags
self.index = 0
self.length = len(source)
self.lineNumber = 0
self.lineStart = 0
def parsePattern(self):
'''Perform sctring escape - for regexp literals'''
return {'type': 'Pattern',
'contents': self.parseDisjunction()}
def parseDisjunction(self):
alternatives = []
while True:
alternatives.append(self.parseAlternative())
if not self.isEOF():
self.expect_character('|')
else:
break
return {'type': 'Disjunction',
'contents': alternatives}
def isEOF(self):
if self.index>=self.length:
return True
return False
def expect_character(self, character):
if self.source[self.index]!=character:
self.throwUnexpected(character)
self.index += 1
def parseAlternative(self):
contents = []
while not self.isEOF() and self.source[self.index]!='|':
contents.append(self.parseTerm())
return {'type': 'Alternative',
'contents': contents}
def follows(self, chars):
for i, c in enumerate(chars):
if self.index+i>=self.length or self.source[self.index+i] != c:
return False
return True
def parseTerm(self):
assertion = self.parseAssertion()
if assertion:
return assertion
else:
return {'type': 'Term',
'contents': self.parseAtom()} # quantifier will go inside atom!
def parseAssertion(self):
if self.follows('$'):
content = SpecialChar('$')
self.index += 1
elif self.follows('^'):
content = SpecialChar('^')
self.index += 1
elif self.follows('\\b'):
content = SpecialChar('\\b')
self.index += 2
elif self.follows('\\B'):
content = SpecialChar('\\B')
self.index += 2
elif self.follows('(?='):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': False}
elif self.follows('(?!'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': True}
else:
return None
return {'type': 'Assertion',
'content': content}
def parseAtom(self):
if self.follows('.'):
content = SpecialChar('.')
self.index += 1
elif self.follows('\\'):
self.index += 1
content = self.parseAtomEscape()
elif self.follows('['):
content = self.parseCharacterClass()
elif self.follows('(?:'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif self.follows('('):
self.index += 1
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif isPatternCharacter(self.source[self.index]):
content = self.source[self.index]
self.index += 1
else:
return None
quantifier = self.parseQuantifier()
return {'type': 'Atom',
'content': content,
'quantifier': quantifier}
def parseQuantifier(self):
prefix = self.parseQuantifierPrefix()
if not prefix:
return None
greedy = True
if self.follows('?'):
self.index += 1
greedy = False
return {'type': 'Quantifier',
'contents': prefix,
'greedy': greedy}
def parseQuantifierPrefix(self):
if self.isEOF():
return None
if self.follows('+'):
content = '+'
self.index += 1
elif self.follows('?'):
content = '?'
self.index += 1
elif self.follows('*'):
content = '*'
self.index += 1
elif self.follows('{'): # try matching otherwise return None and restore the state
i = self.index
self.index += 1
digs1 = self.scanDecimalDigs()
# if no minimal number of digs provided then return no quantifier
if not digs1:
self.index = i
return None
# scan char limit if provided
if self.follows(','):
self.index += 1
digs2 = self.scanDecimalDigs()
else:
digs2 = ''
# must be valid!
if not self.follows('}'):
self.index = i
return None
else:
self.expect_character('}')
content = int(digs1), int(digs2) if digs2 else None
else:
return None
return content
def parseAtomEscape(self):
ch = self.source[self.index]
if isDecimalDigit(ch) and ch!=0:
digs = self.scanDecimalDigs()
elif ch in CHAR_CLASS_ESCAPE:
self.index += 1
return SpecialChar('\\' + ch)
else:
return self.parseCharacterEscape()
def parseCharacterEscape(self):
ch = self.source[self.index]
if ch in CONTROL_ESCAPE_CHARS:
return SpecialChar('\\' + ch)
if ch=='c':
'ok, fuck this shit.'
def scanDecimalDigs(self):
s = self.index
while not self.isEOF() and isDecimalDigit(self.source[self.index]):
self.index += 1
return self.source[s:self.index]
a = JsRegExpParser('a(?=x)', '')
print(a.parsePattern()) | 2.375 | 2 |
connectomics/model/block/squeeze_excitation.py | yixinliao/pytorch_connectomics | 1 | 7173 | import torch.nn as nn
from .basic import *
class squeeze_excitation_2d(nn.Module):
"""Squeeze-and-Excitation Block 2D
Args:
channel (int): number of input channels.
channel_reduction (int): channel squeezing factor.
spatial_reduction (int): pooling factor for x,y axes.
"""
def __init__(self, channel, channel_reduction=4, spatial_reduction=4, norm_mode='bn', act_mode='elu'):
super(squeeze_excitation_2d, self).__init__()
self.pool_size = (spatial_reduction, spatial_reduction)
layers = [nn.AvgPool2d(kernel_size=self.pool_size, stride=self.pool_size)]
layers += conv2d_norm_act(channel, channel // channel_reduction, kernel_size=1, padding=0, norm_mode=norm_mode, act_mode=act_mode, return_list=True)
layers += conv2d_norm_act(channel // channel_reduction, channel, kernel_size=1, padding=0, norm_mode=norm_mode, return_list=True)
layers = [nn.Sigmoid(),
nn.Upsample(scale_factor=self.pool_size, mode='trilinear', align_corners=False)]
self.se = nn.Sequential(*layers)
def forward(self, x):
y = self.se(x)
z = x + y*x
return z
class squeeze_excitation_3d(nn.Module):
"""Squeeze-and-Excitation Block 3D
Args:
channel (int): number of input channels.
channel_reduction (int): channel squeezing factor.
spatial_reduction (int): pooling factor for x,y axes.
z_reduction (int): pooling factor for z axis.
"""
def __init__(self, channel, channel_reduction=4, spatial_reduction=4, z_reduction=1, norm_mode='bn', act_mode='elu'):
super(squeeze_excitation_3d, self).__init__()
self.pool_size = (z_reduction, spatial_reduction, spatial_reduction)
layers = [nn.AvgPool3d(kernel_size=self.pool_size, stride=self.pool_size)]
layers += conv3d_norm_act(channel, channel//channel_reduction, kernel_size=1, padding=0, norm_mode=norm_mode, act_mode=act_mode, return_list=True)
layers += conv3d_norm_act(channel//channel_reduction, channel, kernel_size=1, padding=0, norm_mode=norm_mode, return_list=True)
layers += [nn.Sigmoid(),
nn.Upsample(scale_factor=self.pool_size, mode='trilinear', align_corners=False)]
self.se = nn.Sequential(*layers)
def forward(self, x):
y = self.se(x)
z = x + y*x
return z
| 2.71875 | 3 |
duckdown/handlers/site_handler.py | blueshed/duckdown | 0 | 7174 | <reponame>blueshed/duckdown
# pylint: disable=W0201, E1101
""" handle request for markdown pages """
import logging
import os
import importlib
from tornado.web import RequestHandler, HTTPError
from tornado.escape import url_escape
from ..utils.converter_mixin import ConverterMixin
from .access_control import UserMixin
from ..utils.nav import nav
LOGGER = logging.getLogger(__name__)
EMPTY_TOC = '<div class="toc">\n<ul></ul>\n</div>\n'
class SiteHandler(
UserMixin, ConverterMixin, RequestHandler
): # pylint: disable=W0223
""" inline transform request for markdown pages """
def initialize(self, pages):
""" setup init properties """
self.pages = pages
self.meta = None
self.nav = None
self.site_nav = None
self.site = None
def create_template_loader(self, template_path):
""" if we have one, us it """
if self.site.template_loader:
return self.site.template_loader
return super().create_template_loader(template_path)
@property
def has_toc(self):
""" determin if toc is empty """
return self.meta.toc != EMPTY_TOC
def meta_value(self, name, default=None):
""" return markdown meta value """
return self.meta.Meta.get(name, [default])
def one_meta_value(self, name, default=None):
""" return markdown meta value """
result = self.meta_value(name, default)
return result[0] if result else None
def load_site_nav(self, site, path):
""" set the handler site_nav attribute """
menu = nav(site, root=self.pages, path=path)
if menu:
self.site_nav = "\n".join(menu)
def load_dir_nav(self, site, path):
""" load nav section if it exist """
folder = os.path.dirname(path)
if folder:
LOGGER.info(" -- folder: %s", folder)
nav_path = os.path.join(folder, "-nav.md")
_, content = site.get_file(nav_path)
if content:
content = content.decode("utf-8")
LOGGER.info(" -- nav: %s", nav_path)
content = self.meta.convert(content)
self.nav = self.convert_images(content)
def run_script(
self, site, script_name, path
): # pylint: disable=unused-argument
""" load a module and call module.main """
name = f"{self.settings['script_path']}.{script_name}"
script_module = importlib.import_module(name)
return script_module.main(path)
async def get(self, path):
""" handle get """
path = path if path else "index.html"
file, ext = os.path.splitext(path)
doc = os.path.join(self.pages, f"{file}.md")
self.site = self.get_site(path)
_, content = self.site.get_file(doc)
if content is None:
raise HTTPError(404)
if content:
content = content.decode("utf-8")
self.meta = self.markdown
self.load_dir_nav(self.site, doc)
self.load_site_nav(self.site, path)
file_path = os.path.split(file)[0]
# load theme
theme_file = os.path.join(self.pages, file_path, "-theme.css")
_, theme_css = self.site.get_file(theme_file)
if theme_css:
LOGGER.info(" -- theme.css")
theme_css = theme_css.decode("utf-8")
edit_path = "/edit"
if file:
edit_path = f"/edit?path={ url_escape(file) }.md"
LOGGER.info(" -- ext: %s", ext)
if ext == ".html":
content = self.meta.convert(content)
LOGGER.info(" -- meta: %s", self.meta.Meta)
template = self.one_meta_value("template", "site")
LOGGER.info(" -- tmpl: %s", template)
for key in self.meta.Meta:
if key.startswith("x-script-"):
outcome = self.run_script(
self.site, self.meta.Meta[key][0], path
)
self.meta.Meta[key] = [outcome]
self.render(
f"{template}_tmpl.html",
content=self.convert_images(content),
edit_path=edit_path,
theme_css=theme_css,
)
else:
self.write(self.convert_images(content))
| 2.328125 | 2 |
Problemset/binary-search-tree-to-greater-sum-tree/binary-search-tree-to-greater-sum-tree.py | KivenCkl/LeetCode | 7 | 7175 |
# @Title: 从二叉搜索树到更大和树 (Binary Search Tree to Greater Sum Tree)
# @Author: KivenC
# @Date: 2019-05-15 19:52:08
# @Runtime: 48 ms
# @Memory: 13 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def __init__(self):
self.sum_value = 0
def bstToGst(self, root: TreeNode) -> TreeNode:
if not root:
return
self.bstToGst(root.right)
root.val = self.sum_value = self.sum_value + root.val
self.bstToGst(root.left)
return root
| 3.390625 | 3 |
vine/clone.py | robinson96/GRAPE | 4 | 7176 | import os
import option
import utility
import grapeMenu
import grapeGit as git
import grapeConfig
class Clone(option.Option):
""" grape-clone
Clones a git repo and configures it for use with git.
Usage: grape-clone <url> <path> [--recursive] [--allNested]
Arguments:
<url> The URL of the remote repository
<path> The directory where you want to clone the repo to.
Options:
--recursive Recursively clone submodules.
--allNested Get all nested subprojects.
"""
def __init__(self):
super(Clone, self).__init__()
self._key = "clone"
self._section = "Getting Started"
#Clones the default repo into a new local repo
def description(self):
return "Clone a repo and configure it for grape"
def execute(self, args):
remotepath = args["<url>"]
destpath = args["<path>"]
rstr = "--recursive" if args["--recursive"] else ""
utility.printMsg("Cloning %s into %s %s" % (remotepath, destpath, "recursively" if args["--recursive"] else ""))
git.clone(" %s %s %s" % (rstr, remotepath, destpath))
utility.printMsg("Clone succeeded!")
os.chdir(destpath)
grapeConfig.read()
# ensure you start on a reasonable publish branch
menu = grapeMenu.menu()
config = grapeConfig.grapeConfig()
publicBranches = config.getPublicBranchList()
if publicBranches:
if "develop" in publicBranches:
initialBranch = "develop"
elif "master" in publicBranches:
initialBranch = "master"
else:
initialBranch = publicBranches[0]
menu.applyMenuChoice("checkout", args=[initialBranch])
if args["--allNested"]:
configArgs = ["--uv","--uvArg=--allNestedSubprojects"]
else:
configArgs = []
return menu.applyMenuChoice("config", configArgs)
def setDefaultConfig(self, config):
pass
| 3 | 3 |
neo/test/iotest/test_nixio.py | pearsonlab/python-neo | 0 | 7177 | # -*- coding: utf-8 -*-
# Copyright (c) 2016, German Neuroinformatics Node (G-Node)
# <NAME> <<EMAIL>>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the BSD License. See
# LICENSE file in the root of the Project.
"""
Tests for neo.io.nixio
"""
import os
from datetime import datetime
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
import string
import itertools
from six import string_types
import numpy as np
import quantities as pq
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal,
IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch)
from neo.test.iotest.common_io_test import BaseTestIO
try:
import nixio
HAVE_NIX = True
except ImportError:
HAVE_NIX = False
from neo.io.nixio import NixIO
from neo.io.nixio import nixtypes
@unittest.skipUnless(HAVE_NIX, "Requires NIX")
class NixIOTest(unittest.TestCase):
filename = None
io = None
def compare_blocks(self, neoblocks, nixblocks):
for neoblock, nixblock in zip(neoblocks, nixblocks):
self.compare_attr(neoblock, nixblock)
self.assertEqual(len(neoblock.segments), len(nixblock.groups))
for idx, neoseg in enumerate(neoblock.segments):
nixgrp = nixblock.groups[neoseg.name]
self.compare_segment_group(neoseg, nixgrp)
for idx, neochx in enumerate(neoblock.channel_indexes):
if neochx.name:
nixsrc = nixblock.sources[neochx.name]
else:
nixsrc = nixblock.sources[idx]
self.compare_chx_source(neochx, nixsrc)
self.check_refs(neoblock, nixblock)
def compare_chx_source(self, neochx, nixsrc):
self.compare_attr(neochx, nixsrc)
nix_channels = list(src for src in nixsrc.sources
if src.type == "neo.channelindex")
self.assertEqual(len(neochx.index), len(nix_channels))
for nixchan in nix_channels:
nixchanidx = nixchan.metadata["index"]
try:
neochanpos = list(neochx.index).index(nixchanidx)
except ValueError:
self.fail("Channel indexes do not match.")
if len(neochx.channel_names):
neochanname = neochx.channel_names[neochanpos]
if ((not isinstance(neochanname, str)) and
isinstance(neochanname, bytes)):
neochanname = neochanname.decode()
nixchanname = nixchan.name
self.assertEqual(neochanname, nixchanname)
nix_units = list(src for src in nixsrc.sources
if src.type == "neo.unit")
self.assertEqual(len(neochx.units), len(nix_units))
for neounit in neochx.units:
nixunit = nixsrc.sources[neounit.name]
self.compare_attr(neounit, nixunit)
def check_refs(self, neoblock, nixblock):
"""
Checks whether the references between objects that are not nested are
mapped correctly (e.g., SpikeTrains referenced by a Unit).
:param neoblock: A Neo block
:param nixblock: The corresponding NIX block
"""
for idx, neochx in enumerate(neoblock.channel_indexes):
if neochx.name:
nixchx = nixblock.sources[neochx.name]
else:
nixchx = nixblock.sources[idx]
# AnalogSignals referencing CHX
neoasigs = list(sig.name for sig in neochx.analogsignals)
nixasigs = list(set(da.metadata.name for da in nixblock.data_arrays
if da.type == "neo.analogsignal" and
nixchx in da.sources))
self.assertEqual(len(neoasigs), len(nixasigs))
# IrregularlySampledSignals referencing CHX
neoisigs = list(sig.name for sig in neochx.irregularlysampledsignals)
nixisigs = list(set(da.metadata.name for da in nixblock.data_arrays
if da.type == "neo.irregularlysampledsignal" and
nixchx in da.sources))
self.assertEqual(len(neoisigs), len(nixisigs))
# SpikeTrains referencing CHX and Units
for sidx, neounit in enumerate(neochx.units):
if neounit.name:
nixunit = nixchx.sources[neounit.name]
else:
nixunit = nixchx.sources[sidx]
neosts = list(st.name for st in neounit.spiketrains)
nixsts = list(mt for mt in nixblock.multi_tags
if mt.type == "neo.spiketrain" and
nixunit.name in mt.sources)
# SpikeTrains must also reference CHX
for nixst in nixsts:
self.assertIn(nixchx.name, nixst.sources)
nixsts = list(st.name for st in nixsts)
self.assertEqual(len(neosts), len(nixsts))
for neoname in neosts:
if neoname:
self.assertIn(neoname, nixsts)
# Events and Epochs must reference all Signals in the Group (NIX only)
for nixgroup in nixblock.groups:
nixevep = list(mt for mt in nixgroup.multi_tags
if mt.type in ["neo.event", "neo.epoch"])
nixsigs = list(da.name for da in nixgroup.data_arrays
if da.type in ["neo.analogsignal",
"neo.irregularlysampledsignal"])
for nee in nixevep:
for ns in nixsigs:
self.assertIn(ns, nee.references)
def compare_segment_group(self, neoseg, nixgroup):
self.compare_attr(neoseg, nixgroup)
neo_signals = neoseg.analogsignals + neoseg.irregularlysampledsignals
self.compare_signals_das(neo_signals, nixgroup.data_arrays)
neo_eests = neoseg.epochs + neoseg.events + neoseg.spiketrains
self.compare_eests_mtags(neo_eests, nixgroup.multi_tags)
def compare_signals_das(self, neosignals, data_arrays):
for sig in neosignals:
if self.io._find_lazy_loaded(sig) is not None:
sig = self.io.load_lazy_object(sig)
dalist = list()
for idx in itertools.count():
nixname = "{}.{}".format(sig.name, idx)
if nixname in data_arrays:
dalist.append(data_arrays[nixname])
else:
break
_, nsig = np.shape(sig)
self.assertEqual(nsig, len(dalist))
self.compare_signal_dalist(sig, dalist)
def compare_signal_dalist(self, neosig, nixdalist):
"""
Check if a Neo Analog or IrregularlySampledSignal matches a list of
NIX DataArrays.
:param neosig: Neo Analog or IrregularlySampledSignal
:param nixdalist: List of DataArrays
"""
nixmd = nixdalist[0].metadata
self.assertTrue(all(nixmd == da.metadata for da in nixdalist))
neounit = str(neosig.dimensionality)
for sig, da in zip(np.transpose(neosig),
sorted(nixdalist, key=lambda d: d.name)):
self.compare_attr(neosig, da)
np.testing.assert_almost_equal(sig.magnitude, da)
self.assertEqual(neounit, da.unit)
timedim = da.dimensions[0]
if isinstance(neosig, AnalogSignal):
self.assertIsInstance(timedim, nixtypes["SampledDimension"])
self.assertEqual(
pq.Quantity(timedim.sampling_interval, timedim.unit),
neosig.sampling_period
)
self.assertEqual(timedim.offset, neosig.t_start.magnitude)
if "t_start.units" in da.metadata.props:
self.assertEqual(da.metadata["t_start.units"],
str(neosig.t_start.dimensionality))
elif isinstance(neosig, IrregularlySampledSignal):
self.assertIsInstance(timedim, nixtypes["RangeDimension"])
np.testing.assert_almost_equal(neosig.times.magnitude,
timedim.ticks)
self.assertEqual(timedim.unit,
str(neosig.times.dimensionality))
def compare_eests_mtags(self, eestlist, mtaglist):
self.assertEqual(len(eestlist), len(mtaglist))
for eest in eestlist:
if self.io._find_lazy_loaded(eest) is not None:
eest = self.io.load_lazy_object(eest)
mtag = mtaglist[eest.name]
if isinstance(eest, Epoch):
self.compare_epoch_mtag(eest, mtag)
elif isinstance(eest, Event):
self.compare_event_mtag(eest, mtag)
elif isinstance(eest, SpikeTrain):
self.compare_spiketrain_mtag(eest, mtag)
def compare_epoch_mtag(self, epoch, mtag):
self.assertEqual(mtag.type, "neo.epoch")
self.compare_attr(epoch, mtag)
np.testing.assert_almost_equal(epoch.times.magnitude, mtag.positions)
np.testing.assert_almost_equal(epoch.durations.magnitude, mtag.extents)
self.assertEqual(mtag.positions.unit,
str(epoch.times.units.dimensionality))
self.assertEqual(mtag.extents.unit,
str(epoch.durations.units.dimensionality))
for neol, nixl in zip(epoch.labels,
mtag.positions.dimensions[0].labels):
# Dirty. Should find the root cause instead
if isinstance(neol, bytes):
neol = neol.decode()
if isinstance(nixl, bytes):
nixl = nixl.decode()
self.assertEqual(neol, nixl)
def compare_event_mtag(self, event, mtag):
self.assertEqual(mtag.type, "neo.event")
self.compare_attr(event, mtag)
np.testing.assert_almost_equal(event.times.magnitude, mtag.positions)
self.assertEqual(mtag.positions.unit, str(event.units.dimensionality))
for neol, nixl in zip(event.labels,
mtag.positions.dimensions[0].labels):
# Dirty. Should find the root cause instead
# Only happens in 3.2
if isinstance(neol, bytes):
neol = neol.decode()
if isinstance(nixl, bytes):
nixl = nixl.decode()
self.assertEqual(neol, nixl)
def compare_spiketrain_mtag(self, spiketrain, mtag):
self.assertEqual(mtag.type, "neo.spiketrain")
self.compare_attr(spiketrain, mtag)
np.testing.assert_almost_equal(spiketrain.times.magnitude,
mtag.positions)
if len(mtag.features):
neowf = spiketrain.waveforms
nixwf = mtag.features[0].data
self.assertEqual(np.shape(neowf), np.shape(nixwf))
self.assertEqual(nixwf.unit, str(neowf.units.dimensionality))
np.testing.assert_almost_equal(neowf.magnitude, nixwf)
self.assertIsInstance(nixwf.dimensions[0], nixtypes["SetDimension"])
self.assertIsInstance(nixwf.dimensions[1], nixtypes["SetDimension"])
self.assertIsInstance(nixwf.dimensions[2],
nixtypes["SampledDimension"])
def compare_attr(self, neoobj, nixobj):
if neoobj.name:
if isinstance(neoobj, (AnalogSignal, IrregularlySampledSignal)):
nix_name = ".".join(nixobj.name.split(".")[:-1])
else:
nix_name = nixobj.name
self.assertEqual(neoobj.name, nix_name)
self.assertEqual(neoobj.description, nixobj.definition)
if hasattr(neoobj, "rec_datetime") and neoobj.rec_datetime:
self.assertEqual(neoobj.rec_datetime,
datetime.fromtimestamp(nixobj.created_at))
if hasattr(neoobj, "file_datetime") and neoobj.file_datetime:
self.assertEqual(neoobj.file_datetime,
datetime.fromtimestamp(
nixobj.metadata["file_datetime"]))
if neoobj.annotations:
nixmd = nixobj.metadata
for k, v, in neoobj.annotations.items():
if isinstance(v, pq.Quantity):
self.assertEqual(nixmd.props[str(k)].unit,
str(v.dimensionality))
np.testing.assert_almost_equal(nixmd[str(k)],
v.magnitude)
else:
self.assertEqual(nixmd[str(k)], v)
@classmethod
def create_full_nix_file(cls, filename):
nixfile = nixio.File.open(filename, nixio.FileMode.Overwrite)
nix_block_a = nixfile.create_block(cls.rword(10), "neo.block")
nix_block_a.definition = cls.rsentence(5, 10)
nix_block_b = nixfile.create_block(cls.rword(10), "neo.block")
nix_block_b.definition = cls.rsentence(3, 3)
nix_block_a.metadata = nixfile.create_section(
nix_block_a.name, nix_block_a.name+".metadata"
)
nix_block_b.metadata = nixfile.create_section(
nix_block_b.name, nix_block_b.name+".metadata"
)
nix_blocks = [nix_block_a, nix_block_b]
for blk in nix_blocks:
for ind in range(3):
group = blk.create_group(cls.rword(), "neo.segment")
group.definition = cls.rsentence(10, 15)
group_md = blk.metadata.create_section(group.name,
group.name+".metadata")
group.metadata = group_md
blk = nix_blocks[0]
group = blk.groups[0]
allspiketrains = list()
allsignalgroups = list()
# analogsignals
for n in range(3):
siggroup = list()
asig_name = "{}_asig{}".format(cls.rword(10), n)
asig_definition = cls.rsentence(5, 5)
asig_md = group.metadata.create_section(asig_name,
asig_name+".metadata")
for idx in range(3):
da_asig = blk.create_data_array(
"{}.{}".format(asig_name, idx),
"neo.analogsignal",
data=cls.rquant(100, 1)
)
da_asig.definition = asig_definition
da_asig.unit = "mV"
da_asig.metadata = asig_md
timedim = da_asig.append_sampled_dimension(0.01)
timedim.unit = "ms"
timedim.label = "time"
timedim.offset = 10
da_asig.append_set_dimension()
group.data_arrays.append(da_asig)
siggroup.append(da_asig)
allsignalgroups.append(siggroup)
# irregularlysampledsignals
for n in range(2):
siggroup = list()
isig_name = "{}_isig{}".format(cls.rword(10), n)
isig_definition = cls.rsentence(12, 12)
isig_md = group.metadata.create_section(isig_name,
isig_name+".metadata")
isig_times = cls.rquant(200, 1, True)
for idx in range(10):
da_isig = blk.create_data_array(
"{}.{}".format(isig_name, idx),
"neo.irregularlysampledsignal",
data=cls.rquant(200, 1)
)
da_isig.definition = isig_definition
da_isig.unit = "mV"
da_isig.metadata = isig_md
timedim = da_isig.append_range_dimension(isig_times)
timedim.unit = "s"
timedim.label = "time"
da_isig.append_set_dimension()
group.data_arrays.append(da_isig)
siggroup.append(da_isig)
allsignalgroups.append(siggroup)
# SpikeTrains with Waveforms
for n in range(4):
stname = "{}-st{}".format(cls.rword(20), n)
times = cls.rquant(400, 1, True)
times_da = blk.create_data_array(
"{}.times".format(stname),
"neo.spiketrain.times",
data=times
)
times_da.unit = "ms"
mtag_st = blk.create_multi_tag(stname,
"neo.spiketrain",
times_da)
group.multi_tags.append(mtag_st)
mtag_st.definition = cls.rsentence(20, 30)
mtag_st_md = group.metadata.create_section(
mtag_st.name, mtag_st.name+".metadata"
)
mtag_st.metadata = mtag_st_md
mtag_st_md.create_property(
"t_stop", nixio.Value(max(times_da).item()+1)
)
waveforms = cls.rquant((10, 8, 5), 1)
wfname = "{}.waveforms".format(mtag_st.name)
wfda = blk.create_data_array(wfname, "neo.waveforms",
data=waveforms)
wfda.unit = "mV"
mtag_st.create_feature(wfda, nixio.LinkType.Indexed)
wfda.append_set_dimension() # spike dimension
wfda.append_set_dimension() # channel dimension
wftimedim = wfda.append_sampled_dimension(0.1)
wftimedim.unit = "ms"
wftimedim.label = "time"
wfda.metadata = mtag_st_md.create_section(
wfname, "neo.waveforms.metadata"
)
wfda.metadata.create_property("left_sweep",
[nixio.Value(20)]*5)
allspiketrains.append(mtag_st)
# Epochs
for n in range(3):
epname = "{}-ep{}".format(cls.rword(5), n)
times = cls.rquant(5, 1, True)
times_da = blk.create_data_array(
"{}.times".format(epname),
"neo.epoch.times",
data=times
)
times_da.unit = "s"
extents = cls.rquant(5, 1)
extents_da = blk.create_data_array(
"{}.durations".format(epname),
"neo.epoch.durations",
data=extents
)
extents_da.unit = "s"
mtag_ep = blk.create_multi_tag(
epname, "neo.epoch", times_da
)
group.multi_tags.append(mtag_ep)
mtag_ep.definition = cls.rsentence(2)
mtag_ep.extents = extents_da
label_dim = mtag_ep.positions.append_set_dimension()
label_dim.labels = cls.rsentence(5).split(" ")
# reference all signals in the group
for siggroup in allsignalgroups:
mtag_ep.references.extend(siggroup)
# Events
for n in range(2):
evname = "{}-ev{}".format(cls.rword(5), n)
times = cls.rquant(5, 1, True)
times_da = blk.create_data_array(
"{}.times".format(evname),
"neo.event.times",
data=times
)
times_da.unit = "s"
mtag_ev = blk.create_multi_tag(
evname, "neo.event", times_da
)
group.multi_tags.append(mtag_ev)
mtag_ev.definition = cls.rsentence(2)
label_dim = mtag_ev.positions.append_set_dimension()
label_dim.labels = cls.rsentence(5).split(" ")
# reference all signals in the group
for siggroup in allsignalgroups:
mtag_ev.references.extend(siggroup)
# CHX
nixchx = blk.create_source(cls.rword(10),
"neo.channelindex")
nixchx.metadata = nix_blocks[0].metadata.create_section(
nixchx.name, "neo.channelindex.metadata"
)
chantype = "neo.channelindex"
# 3 channels
for idx in [2, 5, 9]:
channame = cls.rword(20)
nixrc = nixchx.create_source(channame, chantype)
nixrc.definition = cls.rsentence(13)
nixrc.metadata = nixchx.metadata.create_section(
nixrc.name, "neo.channelindex.metadata"
)
nixrc.metadata.create_property("index", nixio.Value(idx))
dims = tuple(map(nixio.Value, cls.rquant(3, 1)))
nixrc.metadata.create_property("coordinates", dims)
nixrc.metadata.create_property("coordinates.units",
nixio.Value("um"))
nunits = 1
stsperunit = np.array_split(allspiketrains, nunits)
for idx in range(nunits):
unitname = "{}-unit{}".format(cls.rword(5), idx)
nixunit = nixchx.create_source(unitname, "neo.unit")
nixunit.definition = cls.rsentence(4, 10)
for st in stsperunit[idx]:
st.sources.append(nixchx)
st.sources.append(nixunit)
# pick a few signal groups to reference this CHX
randsiggroups = np.random.choice(allsignalgroups, 5, False)
for siggroup in randsiggroups:
for sig in siggroup:
sig.sources.append(nixchx)
return nixfile
@staticmethod
def rdate():
return datetime(year=np.random.randint(1980, 2020),
month=np.random.randint(1, 13),
day=np.random.randint(1, 29))
@classmethod
def populate_dates(cls, obj):
obj.file_datetime = cls.rdate()
obj.rec_datetime = cls.rdate()
@staticmethod
def rword(n=10):
return "".join(np.random.choice(list(string.ascii_letters), n))
@classmethod
def rsentence(cls, n=3, maxwl=10):
return " ".join(cls.rword(np.random.randint(1, maxwl))
for _ in range(n))
@classmethod
def rdict(cls, nitems):
rd = dict()
for _ in range(nitems):
key = cls.rword()
value = cls.rword() if np.random.choice((0, 1)) \
else np.random.uniform()
rd[key] = value
return rd
@staticmethod
def rquant(shape, unit, incr=False):
try:
dim = len(shape)
except TypeError:
dim = 1
if incr and dim > 1:
raise TypeError("Shape of quantity array may only be "
"one-dimensional when incremental values are "
"requested.")
arr = np.random.random(shape)
if incr:
arr = np.array(np.cumsum(arr))
return arr*unit
@classmethod
def create_all_annotated(cls):
times = cls.rquant(1, pq.s)
signal = cls.rquant(1, pq.V)
blk = Block()
blk.annotate(**cls.rdict(3))
seg = Segment()
seg.annotate(**cls.rdict(4))
blk.segments.append(seg)
asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz)
asig.annotate(**cls.rdict(2))
seg.analogsignals.append(asig)
isig = IrregularlySampledSignal(times=times, signal=signal,
time_units=pq.s)
isig.annotate(**cls.rdict(2))
seg.irregularlysampledsignals.append(isig)
epoch = Epoch(times=times, durations=times)
epoch.annotate(**cls.rdict(4))
seg.epochs.append(epoch)
event = Event(times=times)
event.annotate(**cls.rdict(4))
seg.events.append(event)
spiketrain = SpikeTrain(times=times, t_stop=pq.s, units=pq.s)
d = cls.rdict(6)
d["quantity"] = pq.Quantity(10, "mV")
d["qarray"] = pq.Quantity(range(10), "mA")
spiketrain.annotate(**d)
seg.spiketrains.append(spiketrain)
chx = ChannelIndex(name="achx", index=[1, 2])
chx.annotate(**cls.rdict(5))
blk.channel_indexes.append(chx)
unit = Unit()
unit.annotate(**cls.rdict(2))
chx.units.append(unit)
return blk
class NixIOWriteTest(NixIOTest):
def setUp(self):
self.filename = "nixio_testfile_write.h5"
self.writer = NixIO(self.filename, "ow")
self.io = self.writer
self.reader = nixio.File.open(self.filename,
nixio.FileMode.ReadOnly)
def tearDown(self):
del self.writer
self.reader.close()
os.remove(self.filename)
def write_and_compare(self, blocks):
self.writer.write_all_blocks(blocks)
self.compare_blocks(self.writer.read_all_blocks(), self.reader.blocks)
def test_block_write(self):
block = Block(name=self.rword(),
description=self.rsentence())
self.write_and_compare([block])
block.annotate(**self.rdict(5))
self.write_and_compare([block])
def test_segment_write(self):
block = Block(name=self.rword())
segment = Segment(name=self.rword(), description=self.rword())
block.segments.append(segment)
self.write_and_compare([block])
segment.annotate(**self.rdict(2))
self.write_and_compare([block])
def test_channel_index_write(self):
block = Block(name=self.rword())
chx = ChannelIndex(name=self.rword(),
description=self.rsentence(),
index=[1, 2, 3, 5, 8, 13])
block.channel_indexes.append(chx)
self.write_and_compare([block])
chx.annotate(**self.rdict(3))
self.write_and_compare([block])
def test_signals_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
asig = AnalogSignal(signal=self.rquant((10, 3), pq.mV),
sampling_rate=pq.Quantity(10, "Hz"))
seg.analogsignals.append(asig)
self.write_and_compare([block])
anotherblock = Block("ir signal block")
seg = Segment("ir signal seg")
anotherblock.segments.append(seg)
irsig = IrregularlySampledSignal(
signal=np.random.random((20, 3)),
times=self.rquant(20, pq.ms, True),
units=pq.A
)
seg.irregularlysampledsignals.append(irsig)
self.write_and_compare([anotherblock])
block.segments[0].analogsignals.append(
AnalogSignal(signal=[10.0, 1.0, 3.0], units=pq.S,
sampling_period=pq.Quantity(3, "s"),
dtype=np.double, name="signal42",
description="this is an analogsignal",
t_start=45 * pq.ms),
)
self.write_and_compare([block, anotherblock])
block.segments[0].irregularlysampledsignals.append(
IrregularlySampledSignal(times=np.random.random(10),
signal=np.random.random((10, 3)),
units="mV", time_units="s",
dtype=np.float,
name="some sort of signal",
description="the signal is described")
)
self.write_and_compare([block, anotherblock])
def test_epoch_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
epoch = Epoch(times=[1, 1, 10, 3]*pq.ms, durations=[3, 3, 3, 1]*pq.ms,
labels=np.array(["one", "two", "three", "four"]),
name="test epoch", description="an epoch for testing")
seg.epochs.append(epoch)
self.write_and_compare([block])
def test_event_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
event = Event(times=np.arange(0, 30, 10)*pq.s,
labels=np.array(["0", "1", "2"]),
name="event name",
description="event description")
seg.events.append(event)
self.write_and_compare([block])
def test_spiketrain_write(self):
block = Block()
seg = Segment()
block.segments.append(seg)
spiketrain = SpikeTrain(times=[3, 4, 5]*pq.s, t_stop=10.0,
name="spikes!", description="sssssspikes")
seg.spiketrains.append(spiketrain)
self.write_and_compare([block])
waveforms = self.rquant((20, 5, 10), pq.mV)
spiketrain = SpikeTrain(times=[1, 1.1, 1.2]*pq.ms, t_stop=1.5*pq.s,
name="spikes with wf",
description="spikes for waveform test",
waveforms=waveforms)
seg.spiketrains.append(spiketrain)
self.write_and_compare([block])
spiketrain.left_sweep = np.random.random(10)*pq.ms
self.write_and_compare([block])
def test_metadata_structure_write(self):
neoblk = self.create_all_annotated()
self.io.write_block(neoblk)
blk = self.io.nix_file.blocks[0]
blkmd = blk.metadata
self.assertEqual(blk.name, blkmd.name)
grp = blk.groups[0] # segment
self.assertIn(grp.name, blkmd.sections)
grpmd = blkmd.sections[grp.name]
for da in grp.data_arrays: # signals
name = ".".join(da.name.split(".")[:-1])
self.assertIn(name, grpmd.sections)
for mtag in grp.multi_tags: # spiketrains, events, and epochs
self.assertIn(mtag.name, grpmd.sections)
srcchx = blk.sources[0] # chx
self.assertIn(srcchx.name, blkmd.sections)
for srcunit in blk.sources: # units
self.assertIn(srcunit.name, blkmd.sections)
self.write_and_compare([neoblk])
def test_anonymous_objects_write(self):
nblocks = 2
nsegs = 2
nanasig = 4
nirrseg = 2
nepochs = 3
nevents = 4
nspiketrains = 3
nchx = 5
nunits = 10
times = self.rquant(1, pq.s)
signal = self.rquant(1, pq.V)
blocks = []
for blkidx in range(nblocks):
blk = Block()
blocks.append(blk)
for segidx in range(nsegs):
seg = Segment()
blk.segments.append(seg)
for anaidx in range(nanasig):
seg.analogsignals.append(AnalogSignal(signal=signal,
sampling_rate=pq.Hz))
for irridx in range(nirrseg):
seg.irregularlysampledsignals.append(
IrregularlySampledSignal(times=times,
signal=signal,
time_units=pq.s)
)
for epidx in range(nepochs):
seg.epochs.append(Epoch(times=times, durations=times))
for evidx in range(nevents):
seg.events.append(Event(times=times))
for stidx in range(nspiketrains):
seg.spiketrains.append(SpikeTrain(times=times, t_stop=pq.s,
units=pq.s))
for chidx in range(nchx):
chx = ChannelIndex(name="chx{}".format(chidx),
index=[1, 2])
blk.channel_indexes.append(chx)
for unidx in range(nunits):
unit = Unit()
chx.units.append(unit)
self.writer.write_all_blocks(blocks)
self.compare_blocks(blocks, self.reader.blocks)
def test_to_value(self):
section = self.io.nix_file.create_section("Metadata value test", "Test")
writeprop = self.io._write_property
# quantity
qvalue = pq.Quantity(10, "mV")
writeprop(section, "qvalue", qvalue)
self.assertEqual(section["qvalue"], 10)
self.assertEqual(section.props["qvalue"].unit, "mV")
# datetime
dt = self.rdate()
writeprop(section, "dt", dt)
self.assertEqual(datetime.fromtimestamp(section["dt"]), dt)
# string
randstr = self.rsentence()
writeprop(section, "randstr", randstr)
self.assertEqual(section["randstr"], randstr)
# bytes
bytestring = b"bytestring"
writeprop(section, "randbytes", bytestring)
self.assertEqual(section["randbytes"], bytestring.decode())
# iterables
randlist = np.random.random(10).tolist()
writeprop(section, "randlist", randlist)
self.assertEqual(randlist, section["randlist"])
randarray = np.random.random(10)
writeprop(section, "randarray", randarray)
np.testing.assert_almost_equal(randarray, section["randarray"])
# numpy item
npval = np.float64(2398)
writeprop(section, "npval", npval)
self.assertEqual(npval, section["npval"])
# number
val = 42
writeprop(section, "val", val)
self.assertEqual(val, section["val"])
# multi-dimensional data -- UNSUPORTED
# mdlist = [[1, 2, 3], [4, 5, 6]]
# writeprop(section, "mdlist", mdlist)
# mdarray = np.random.random((10, 3))
# writeprop(section, "mdarray", mdarray)
class NixIOReadTest(NixIOTest):
filename = "testfile_readtest.h5"
nixfile = None
nix_blocks = None
original_methods = dict()
@classmethod
def setUpClass(cls):
if HAVE_NIX:
cls.nixfile = cls.create_full_nix_file(cls.filename)
def setUp(self):
self.io = NixIO(self.filename, "ro")
self.original_methods["_read_cascade"] = self.io._read_cascade
self.original_methods["_update_maps"] = self.io._update_maps
@classmethod
def tearDownClass(cls):
if HAVE_NIX:
cls.nixfile.close()
def tearDown(self):
del self.io
def test_all_read(self):
neo_blocks = self.io.read_all_blocks(cascade=True, lazy=False)
nix_blocks = self.io.nix_file.blocks
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazyload_fullcascade_read(self):
neo_blocks = self.io.read_all_blocks(cascade=True, lazy=True)
nix_blocks = self.io.nix_file.blocks
# data objects should be empty
for block in neo_blocks:
for seg in block.segments:
for asig in seg.analogsignals:
self.assertEqual(len(asig), 0)
for isig in seg.irregularlysampledsignals:
self.assertEqual(len(isig), 0)
for epoch in seg.epochs:
self.assertEqual(len(epoch), 0)
for event in seg.events:
self.assertEqual(len(event), 0)
for st in seg.spiketrains:
self.assertEqual(len(st), 0)
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazyload_lazycascade_read(self):
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=True)
nix_blocks = self.io.nix_file.blocks
self.compare_blocks(neo_blocks, nix_blocks)
def test_lazycascade_read(self):
def getitem(self, index):
return self._data.__getitem__(index)
from neo.io.nixio import LazyList
getitem_original = LazyList.__getitem__
LazyList.__getitem__ = getitem
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=False)
for block in neo_blocks:
self.assertIsInstance(block.segments, LazyList)
self.assertIsInstance(block.channel_indexes, LazyList)
for seg in block.segments:
self.assertIsInstance(seg, string_types)
for chx in block.channel_indexes:
self.assertIsInstance(chx, string_types)
LazyList.__getitem__ = getitem_original
def test_load_lazy_cascade(self):
from neo.io.nixio import LazyList
neo_blocks = self.io.read_all_blocks(cascade="lazy", lazy=False)
for block in neo_blocks:
self.assertIsInstance(block.segments, LazyList)
self.assertIsInstance(block.channel_indexes, LazyList)
name = block.name
block = self.io.load_lazy_cascade("/" + name, lazy=False)
self.assertIsInstance(block.segments, list)
self.assertIsInstance(block.channel_indexes, list)
for seg in block.segments:
self.assertIsInstance(seg.analogsignals, list)
self.assertIsInstance(seg.irregularlysampledsignals, list)
self.assertIsInstance(seg.epochs, list)
self.assertIsInstance(seg.events, list)
self.assertIsInstance(seg.spiketrains, list)
def test_nocascade_read(self):
self.io._read_cascade = mock.Mock()
neo_blocks = self.io.read_all_blocks(cascade=False)
self.io._read_cascade.assert_not_called()
for block in neo_blocks:
self.assertEqual(len(block.segments), 0)
nix_block = self.io.nix_file.blocks[block.name]
self.compare_attr(block, nix_block)
def test_lazy_load_subschema(self):
blk = self.io.nix_file.blocks[0]
segpath = "/" + blk.name + "/segments/" + blk.groups[0].name
segment = self.io.load_lazy_cascade(segpath, lazy=True)
self.assertIsInstance(segment, Segment)
self.assertEqual(segment.name, blk.groups[0].name)
self.assertIs(segment.block, None)
self.assertEqual(len(segment.analogsignals[0]), 0)
segment = self.io.load_lazy_cascade(segpath, lazy=False)
self.assertEqual(np.shape(segment.analogsignals[0]), (100, 3))
class NixIOHashTest(NixIOTest):
def setUp(self):
self.hash = NixIO._hash_object
def _hash_test(self, objtype, argfuncs):
attr = {}
for arg, func in argfuncs.items():
attr[arg] = func()
obj_one = objtype(**attr)
obj_two = objtype(**attr)
hash_one = self.hash(obj_one)
hash_two = self.hash(obj_two)
self.assertEqual(hash_one, hash_two)
for arg, func in argfuncs.items():
chattr = attr.copy()
chattr[arg] = func()
obj_two = objtype(**chattr)
hash_two = self.hash(obj_two)
self.assertNotEqual(
hash_one, hash_two,
"Hash test failed with different '{}'".format(arg)
)
def test_block_seg_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"rec_datetime": self.rdate,
"file_datetime": self.rdate,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(Block, argfuncs)
self._hash_test(Segment, argfuncs)
self._hash_test(Unit, argfuncs)
def test_chx_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"index": lambda: np.random.random(10).tolist(),
"channel_names": lambda: self.rsentence(10).split(" "),
"coordinates": lambda: [(np.random.random() * pq.cm,
np.random.random() * pq.cm,
np.random.random() * pq.cm)]*10,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(ChannelIndex, argfuncs)
def test_analogsignal_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"signal": lambda: self.rquant((10, 10), pq.mV),
"sampling_rate": lambda: np.random.random() * pq.Hz,
"t_start": lambda: np.random.random() * pq.sec,
"t_stop": lambda: np.random.random() * pq.sec,
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(AnalogSignal, argfuncs)
def test_irregularsignal_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"signal": lambda: self.rquant((10, 10), pq.mV),
"times": lambda: self.rquant(10, pq.ms, True),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(IrregularlySampledSignal, argfuncs)
def test_event_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"times": lambda: self.rquant(10, pq.ms),
"durations": lambda: self.rquant(10, pq.ms),
"labels": lambda: self.rsentence(10).split(" "),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(Event, argfuncs)
self._hash_test(Epoch, argfuncs)
def test_spiketrain_hash(self):
argfuncs = {"name": self.rword,
"description": self.rsentence,
"times": lambda: self.rquant(10, pq.ms, True),
"t_start": lambda: -np.random.random() * pq.sec,
"t_stop": lambda: np.random.random() * 100 * pq.sec,
"waveforms": lambda: self.rquant((10, 10, 20), pq.mV),
# annotations
self.rword(): self.rword,
self.rword(): lambda: self.rquant((10, 10), pq.mV)}
self._hash_test(SpikeTrain, argfuncs)
class NixIOPartialWriteTest(NixIOTest):
filename = "testfile_partialwrite.h5"
nixfile = None
neo_blocks = None
original_methods = dict()
@classmethod
def setUpClass(cls):
if HAVE_NIX:
cls.nixfile = cls.create_full_nix_file(cls.filename)
def setUp(self):
self.io = NixIO(self.filename, "rw")
self.neo_blocks = self.io.read_all_blocks()
self.original_methods["_write_attr_annotations"] =\
self.io._write_attr_annotations
@classmethod
def tearDownClass(cls):
if HAVE_NIX:
cls.nixfile.close()
def tearDown(self):
self.restore_methods()
del self.io
def restore_methods(self):
for name, method in self.original_methods.items():
setattr(self.io, name, self.original_methods[name])
def _mock_write_attr(self, objclass):
typestr = str(objclass.__name__).lower()
self.io._write_attr_annotations = mock.Mock(
wraps=self.io._write_attr_annotations,
side_effect=self.check_obj_type("neo.{}".format(typestr))
)
neo_blocks = self.neo_blocks
self.modify_objects(neo_blocks, excludes=[objclass])
self.io.write_all_blocks(neo_blocks)
self.restore_methods()
def check_obj_type(self, typestring):
neq = self.assertNotEqual
def side_effect_func(*args, **kwargs):
obj = kwargs.get("nixobj", args[0])
if isinstance(obj, list):
for sig in obj:
neq(sig.type, typestring)
else:
neq(obj.type, typestring)
return side_effect_func
@classmethod
def modify_objects(cls, objs, excludes=()):
excludes = tuple(excludes)
for obj in objs:
if not (excludes and isinstance(obj, excludes)):
obj.description = cls.rsentence()
for container in getattr(obj, "_child_containers", []):
children = getattr(obj, container)
cls.modify_objects(children, excludes)
def test_partial(self):
for objclass in NixIO.supported_objects:
self._mock_write_attr(objclass)
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
def test_no_modifications(self):
self.io._write_attr_annotations = mock.Mock()
self.io.write_all_blocks(self.neo_blocks)
self.io._write_attr_annotations.assert_not_called()
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
# clearing hashes and checking again
for k in self.io._object_hashes.keys():
self.io._object_hashes[k] = None
self.io.write_all_blocks(self.neo_blocks)
self.io._write_attr_annotations.assert_not_called()
# changing hashes to force rewrite
for k in self.io._object_hashes.keys():
self.io._object_hashes[k] = "_"
self.io.write_all_blocks(self.neo_blocks)
callcount = self.io._write_attr_annotations.call_count
self.assertEqual(callcount, len(self.io._object_hashes))
self.compare_blocks(self.neo_blocks, self.io.nix_file.blocks)
@unittest.skipUnless(HAVE_NIX, "Requires NIX")
class CommonTests(BaseTestIO, unittest.TestCase):
ioclass = NixIO
| 2.015625 | 2 |
lib/taudataNlpTm.py | taudata-indonesia/elearning | 3 | 7178 | <reponame>taudata-indonesia/elearning
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 11:25:43 2019
@author: <NAME>
<EMAIL>
https://tau-data.id
~~Perjanjian Penggunaan Materi & Codes (PPMC) - License:~~
* Modul Python dan gambar-gambar (images) yang digunakan adalah milik dari berbagai sumber sebagaimana yang telah dicantumkan dalam masing-masing license modul, caption atau watermark.
* Materi & Codes diluar point (1) (i.e. code ini & semua slide ".ipynb)) yang digunakan di tau-data dapat digunakan untuk keperluan akademis dan kegiatan non-komersil lainnya.
* Untuk keperluan diluar point (2), maka dibutuhkan izin tertulis dari Taufik Edy Sutanto (selanjutnya disebut sebagai pengarang).
* Materi & Codes tidak boleh dipublikasikan tanpa izin dari pengarang.
* Materi & codes diberikan "as-is", tanpa warranty. Pengarang tidak bertanggung jawab atas penggunaannya diluar kegiatan resmi yang dilaksanakan pengarang.
* Dengan menggunakan materi dan codes ini berarti pengguna telah menyetujui PPMC ini.
"""
import re, numpy as np
import itertools, nltk
from collections import Counter
from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer;ps = PorterStemmer()
from itertools import chain
import warnings; warnings.simplefilter('ignore')
def lesk_wsd(sentence, ambiguous_word, pos=None, stem=True, hyperhypo=True):
# https://en.wikipedia.org/wiki/Lesk_algorithm
# https://stackoverflow.com/questions/20896278/word-sense-disambiguation-algorithm-in-python
max_overlaps = 0; lesk_sense = None
context_sentence = sentence.split()
for ss in wn.synsets(ambiguous_word):
#break
if pos and ss.pos is not pos: # If POS is specified.
continue
lesk_dictionary = []
lesk_dictionary+= ss.definition().replace('(','').replace(')','').split() # Includes definition.
lesk_dictionary+= ss.lemma_names() # Includes lemma_names.
# Optional: includes lemma_names of hypernyms and hyponyms.
if hyperhypo == True:
lesk_dictionary+= list(chain(*[i.lemma_names() for i in ss.hypernyms()+ss.hyponyms()]))
if stem == True: # Matching exact words causes sparsity, so lets match stems.
lesk_dictionary = [ps.stem(i) for i in lesk_dictionary]
context_sentence = [ps.stem(i) for i in context_sentence]
overlaps = set(lesk_dictionary).intersection(context_sentence)
if len(overlaps) > max_overlaps:
lesk_sense = ss
max_overlaps = len(overlaps)
return lesk_sense.name()
def words(text): return re.findall(r'\w+', text.lower())
corpus = 'data/corpus_sederhana.txt'
WORDS = Counter(words(open(corpus).read()))
def P(word):
"Probability of `word`."
N=sum(WORDS.values())
return WORDS[word] / N
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
def get_nMax(arr, n):
indices = arr.ravel().argsort()[-n:]
indices = (np.unravel_index(i, arr.shape) for i in indices)
return [(arr[i], i) for i in indices]
def filter_for_tags(tagged, tags=['NN', 'JJ', 'NNP']):
return [item for item in tagged if item[1] in tags]
def normalize(tagged):
return [(item[0].replace('.', ''), item[1]) for item in tagged]
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in itertools.ifilterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def lDistance(firstString, secondString):
"Function to find the Levenshtein distance between two words/sentences - gotten from http://rosettacode.org/wiki/Levenshtein_distance#Python"
if len(firstString) > len(secondString):
firstString, secondString = secondString, firstString
distances = range(len(firstString) + 1)
for index2, char2 in enumerate(secondString):
newDistances = [index2 + 1]
for index1, char1 in enumerate(firstString):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(1 + min((distances[index1], distances[index1+1], newDistances[-1])))
distances = newDistances
return distances[-1] | 2.625 | 3 |
reamber/base/MapSet.py | Eve-ning/reamber_base_py | 10 | 7179 | from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass, field
from typing import List, Iterator, TypeVar, Union, Any, Generic
import pandas as pd
from pandas.core.indexing import _LocIndexer
from reamber.base.Map import Map
from reamber.base.Property import stack_props
NoteListT = TypeVar('NoteListT')
HitListT = TypeVar('HitListT')
HoldListT = TypeVar('HoldListT')
BpmListT = TypeVar('BpmListT')
MapT = TypeVar('MapT')
@dataclass
class MapSet(Generic[NoteListT, HitListT, HoldListT, BpmListT, MapT]):
maps: List[MapT[NoteListT, HitListT, HoldListT, BpmListT]] = field(default_factory=lambda: [])
def __init__(self, maps: List[MapT[NoteListT, HitListT, HoldListT, BpmListT]]):
self.maps = maps
def __iter__(self) -> Iterator[MapT]:
for m in self.maps:
yield m
def items(self):
for m in self.maps:
yield m.__class__, m
def __getitem__(self, item: Union[Any, type]):
if isinstance(item, type):
# We want to index by type.
return [m[item][0] for m in self.maps]
else:
# We want to index by slice/int/etc.
return self.maps[item]
def __setitem__(self, key: Union[Any, type], value):
this = self[key]
assert len(this) == len(value), "The lengths of the set and get must be the same."
for i in range(len(this)): this[i] = value[i]
def deepcopy(self):
""" Returns a deep copy of itself """
return deepcopy(self)
def describe(self, rounding: int = 2, unicode: bool = False) -> List[str]:
""" Describes the map's attributes as a short summary
:param rounding: The decimal rounding
:param unicode: Whether to attempt to get the non-unicode or unicode. \
Doesn't attempt to translate.
"""
return [m.describe(rounding=rounding, unicode=unicode, s=self) for m in self]
def rate(self, by: float) -> MapSet:
""" Changes the rate of the map. Note that you need to do rate on the mapset to affect BPM.
:param by: The value to rate it by. 1.1x speeds up the song by 10%. Hence 10/11 of the length.
"""
copy = self.deepcopy()
copy.maps = [m.rate(by=by) for m in copy.maps]
return copy
# noinspection DuplicatedCode,PyUnresolvedReferences
@stack_props()
class Stacker:
""" This purpose of this class is to provide unnamed access to the lists.
This can make code much shorter as we don't have to deal with keyed dicts.
For example,
>>> m = Map.stack()
>>> m.offset *= 2
Or if you do it inline,
>>> m.stack().lengths *= 2
This will change the offsets of all lists that have the offset property.
This will change the map itself, as stack is a reference
This also is a "naive" system, so if the property, like column, doesn't exist
for Bpms, it will not break it. However, all properties must exist at least
once.
If the property isn't listed here, you can do string indexing
For example,
>>> m = Map.stack()
>>> m.other_property *= 2
"""
""" How does this work?
Firstly, if you concat a list of dfs, pd will always make a copy, so you have to
preserve the original dfs and also the stacked.
LISTS ---STACK---> COPY ---> STACKED
+---------- REFERENCE ---> UNSTACKED
The reason for stacking is so that we don't have to loop through all dfs to mutate.
If we did loop through the dfs, we have to stack them anyways, so it's as efficient.
However, it's just easier, by my eyes, to stack then attempt to mutate.
So, we keep 2 things in check, the unstacked, and the stacked.
However, we only can mutate the stacked one, then convert to the unstacked, because
the unstacked is the referenced.
Hence, we keep track of what partitions of the unstacked are each of the stacked.
IXS | | | | |
UNSTACKED [........] [........] [..] [....]
STACKED [...............................]
That's where ixs come in to help in converting the stacked values to unstacked.
So the workflow is that when we retrieve a value, it's always from the stacked.
Then, when it's mutated, it can be set and it will always call the _update
to update the referenced unstacked.
"""
stackers: List[Map.Stacker]
# noinspection PyProtectedMember
def __init__(self, stackers: List[Map.Stacker]):
self.stackers = stackers
def __getitem__(self, item):
return pd.DataFrame([i[item] for i in self.stackers])
def __setitem__(self, key, value):
for s, i in zip(self.stackers, value.iloc):
s[key] = i
_props = ['offset', 'column', 'length', 'bpm', 'metronome']
def stack(self, include: List[str] = None):
""" This creates a mutator for this instance, see Mutator for details. """
return self.Stacker([_.stack(include) for _ in self])
| 2.5625 | 3 |
src/poretitioner/utils/filtering.py | uwmisl/poretitioner | 2 | 7180 | """
=========
filtering.py
=========
This module provides more granular filtering for captures.
You can customize your own filters too.
"""
from __future__ import annotations
import re
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from json import JSONEncoder
from pathlib import PosixPath
from typing import (
Any,
Dict,
Iterable,
Mapping,
NewType,
Optional,
Protocol,
Type,
TypedDict,
Union,
)
import h5py
import numpy as np
from h5py import File as Fast5File
from ..hdf5 import (
HasFast5,
HDF5_Group,
HDF5_GroupSerialableDataclass,
HDF5_GroupSerializable,
HDF5_GroupSerializing,
IsAttr,
)
from ..logger import Logger, getLogger
from ..signals import Capture
from .core import NumpyArrayLike, PathLikeOrString, ReadId, stripped_by_keys
from .plugin import Plugin
CaptureOrTimeSeries = Union[Capture, NumpyArrayLike]
# Unique identifier for a collection of filters (e.g. "ProfJeffsAwesomeFilters")
FilterSetId = NewType("FilterSetId", str)
# Unique identifier for an individual filter (e.g. "min_frac")
FilterName = NewType("FilterName", str)
__all__ = [
"does_pass_filters",
"get_filters",
"FilterName",
"FilterSetId",
"FilterConfig",
"Filter",
"Filters",
"DEFAULT_FILTER_PLUGINS",
"FilterSet",
"FilterConfigs",
"FilterPlugin",
"PATH",
]
@dataclass(frozen=True)
class FILTER_PATH:
ROOT = f"/Filter/"
@classmethod
def filter_set_path(cls, filter_set_id: FilterSetId) -> str:
filter_path = str(PosixPath(FILTER_PATH.ROOT, filter_set_id))
return filter_path
@classmethod
def filter_set_pass_path(cls, filter_set_id: FilterSetId) -> str:
pass_path = str(PosixPath(FILTER_PATH.filter_set_path(filter_set_id), "pass"))
return pass_path
@classmethod
def filter_set_pass_path_for_read_id(cls, filter_set_id: FilterSetId, read_id: ReadId) -> str:
pass_path = str(PosixPath(FILTER_PATH.filter_set_pass_path(filter_set_id), read_id))
return pass_path
class FilterConfig(TypedDict):
"""A blueprint for how to construct a FilterPlugin.
Contains a name, and any number of other attributes
Note on terminology:
- FilterConfig: A high-level description of a filter.
- FilterPlugin: An actual, callable, implementation of a FilterConfig.
For custom plugins, make sure "filepath" is an attribute that points to the file to laod
"""
# Mapping of a FilterName to filter configurations.
FilterConfigs = NewType("FilterConfigs", Dict[FilterName, FilterConfig])
# TODO: Filter Plugin should check that name is unique. https://github.com/uwmisl/poretitioner/issues/91
class FilterPlugin(Plugin):
"""
Abstract class for Filter plugins. To write your own filter, subclass this abstract
class and implement the `apply` method and `name` property.
"""
@classmethod
@abstractmethod
def name(cls) -> str:
"""Unique name for this filter.
Make sure it doesn't conflict with any existing names.
Returns
-------
str
The unique name for this filter (e.g. "fourier_transform").
Raises
------
NotImplementedError
Raised if this filter is called without this name method being implemented.
"""
raise NotImplementedError(
"'name' class method not implemented for filter. This class method should return a unique name for this filter."
)
@abstractmethod
def apply(self, capture: CaptureOrTimeSeries) -> bool:
"""Returns True if a capture passes a given filter criteria.
For instance, a range filter would check that a capture's summary statistsics lie within a given range.
Parameters
----------
capture : np.typing.ArrayLike
Time series capture to filter.
Returns
-------
bool
Whether this capture passes the filter.
Raises
------
NotImplementedError
Raised when the filter method isn't implemented by the consuming Filter class
"""
raise NotImplementedError(
"'apply' method not implemented for filter. This method should return True if and only if applied to a capture that meets the filter criterion. For instance, "
)
def __call__(self, capture: CaptureOrTimeSeries) -> bool:
"""Apply the filter.
Defining `__call__` lets us do nice things like:
class MyCustomFilter(FilterPlugin):
def apply(capture):
# ...
pass
# Later in code where filtering is done....
valid_captures = []
filters = [ MyCustomFilter(), AnotherCustomFilter(), ... ]
valid_captures = [capture for capture in captures if all([filt(capture) for filt in filters])]
for capture in captures: # You'd want to parallelize this in a real life example...
for filt in filters:
filtered_captures = filt(capture).
Parameters
----------
capture : CaptureOrTimeSeries
Capture to filter.
Returns
-------
bool
Whether this capture passes the filter.
"""
result = self.apply(capture)
return result
RANGE_FILTER_DEFAULT_MINIMUM: float = -np.inf
RANGE_FILTER_DEFAULT_MAXIMUM: float = np.inf
class RangeFilter(FilterPlugin):
def __init__(self, minimum: Optional[float] = None, maximum: Optional[float] = None):
"""A filter that filters based on whether a signal falls between a maximum and a minimum.
Parameters
----------
minimum : float, optional
The smallest value this signal should be allowed to take (inclusive), by default RangeFilter.DEFAULT_MINIMUM
maximum : float, optional
The largest value this signal should be allowed to take (inclusive), by default RangeFilter.DEFAULT_MAXIMUM
"""
self.minimum = minimum if minimum is not None else RANGE_FILTER_DEFAULT_MINIMUM
self.maximum = maximum if maximum is not None else RANGE_FILTER_DEFAULT_MAXIMUM
def extract(self, capture: CaptureOrTimeSeries) -> NumpyArrayLike:
"""Extracts a summary statistic from the capture (e.g. mean, length, standard deviation).
Identity operation by default (just returns the capture).
You can use this function to transform the data in a useful way before processing it (e.g.
getting the mean value of a capture before filtering based on that mean.)
Note: If we picture the filtering workflow as an ETL (Extract-Transform-Load) pipeline, this would be the "transform"
(take data, modify it for a later purpose), but I feel that "transform" is perhaps a misleading function name in this context.
Parameters
----------
capture : CaptureOrTimeSeries
Capture from which to extract data.
"""
try:
signal = capture.fractionalized()
except AttributeError:
signal = capture
else:
signal = capture
return signal
# signal = getattr(capture, Capture.fractionalized.__name__, capture)
def is_in_range(self, value: Union[NumpyArrayLike, float]) -> bool:
try:
# If the value is just a float, we can use this handy syntax:
return self.minimum <= value <= self.maximum
except ValueError:
# But we're not allowed to use that syntax on numpy arrays.
return all(np.logical_and(self.minimum <= value, value <= self.maximum))
def apply(self, signal):
value = self.extract(signal)
return self.is_in_range(value)
class StandardDeviationFilter(RangeFilter):
"""Filters for captures with standard deviations in some range."""
@classmethod
def name(cls) -> str:
return "stdv"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.std(signal)
class MeanFilter(RangeFilter):
"""Filters for captures with an arithmetic mean within a range."""
@classmethod
def name(cls) -> str:
return "mean"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.mean(signal)
class MedianFilter(RangeFilter):
"""Filters for captures with a median within a range."""
@classmethod
def name(cls) -> str:
return "median"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.median(signal)
class MinimumFilter(RangeFilter):
"""Filters for captures with a minimum within a range."""
@classmethod
def name(cls) -> str:
return "min"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.min(signal)
class MaximumFilter(RangeFilter):
"""Filters for captures with a maximum within a range."""
@classmethod
def name(cls) -> str:
return "max"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.max(signal)
class LengthFilter(RangeFilter):
"""Filters captures based on their length."""
@classmethod
def name(cls) -> str:
return "length"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return len(signal)
class EjectedFilter(FilterPlugin):
"""Filters captures based on whether they were ejected from the pore."""
@classmethod
def name(cls) -> str:
return "ejected"
def extract(self, capture: Capture):
return capture.ejected
"""
How to Create Your Own Custom Filter:
Need more advanced filtering than what we provide out of the box? No problem.
Create your own custom filter by inheriting from the FilterPlugin class.
For this example, let's do something complex. Say you only want to examine captures
that have more than 5 samples with a hyperbolic tangent greater than some threshold.
That means our custom filter's `apply` function should return True if and only if
the signal has more than 5 samples greater than the threshold, after taking the hyperbolic tangent in `extract`.
"""
class MyCustomFilter(FilterPlugin):
threshold: float = 0.5 # Totally arbitrary.
def name(self):
return "foo"
def extract(self, capture):
# Do the transformations here, or pre-process it before the filter.
# Gets the hyperbolic tangent of the signal.
extracted = np.tanh(capture.signal)
return extracted
def apply(self, signal):
# Only return true if more than 5 samples have a square root greater than 2.0 (arbitrary)
extracted = self.extract(signal)
# If we want to filter out signals with fewer than 5 matching samples, then we
# should retrun True when there are 5 or more matching samples.
n_meeting_threshold = len(
extracted[extracted > self.threshold]
) # Number of samples greater than the threshold
meets_criteria = (
n_meeting_threshold >= 5
) # Are there at least 5 samples meeting this threshold?
return meets_criteria
def apply_feature_filters(capture: CaptureOrTimeSeries, filters: List[FilterPlugin]) -> bool:
"""
Check whether an array of current values (i.e. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Notes on filter behavior: If the filters list is empty, there are no filters
and the capture passes.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : List[FilterPlugin]
List of FilterPlugin instances. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
if filters is None:
filters = []
# TODO: Parallelize? https://github.com/uwmisl/poretitioner/issues/67
filtered = [filter_out(capture) for filter_out in filters]
print(filtered)
# Did this signal pass all filters?
all_passed = all(filtered)
return all_passed
def check_capture_ejection_by_read(f5, read_id):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Parameters
----------
f5 : h5py.File object (open for reading or more)
Capture fast5 file
read_id : TODO
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
try:
ejected = f5.get(f"/read_{read_id}/Signal").attrs["ejected"]
except AttributeError:
raise ValueError(f"path /read_{read_id} does not exist in the fast5 file.")
return ejected
def check_capture_ejection(end_capture, voltage_ends, tol_obs=20):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Essentially checks whether a value (end_capture) is close enough (within
a margin of tol_obs) to any value in voltage_ends.
Parameters
----------
end_capture : numeric
The end time of the capture.
voltage_ends : list of numeric
List of times when the standard voltage ends.
tol_obs : int, optional
Tolerance for defining when the end of the capture = voltage end, by default 20
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
for voltage_end in voltage_ends:
if np.abs(end_capture - voltage_end) < tol_obs:
return True
return False
def filter_like_existing(config, example_fast5, example_filter_path, fast5_files, new_filter_path):
# Filters a set of fast5 files exactly the same as an existing filter
# TODO : #68 : implement
raise NotImplementedError()
def get_filter_pass_path(filter_set_id, read_id):
return FILTER_PATH.filter_set_pass_path(filter_set_id)
__DEFAULT_FILTER_PLUGINS = [
MeanFilter,
StandardDeviationFilter,
MedianFilter,
MinimumFilter,
MaximumFilter,
LengthFilter,
]
DEFAULT_FILTER_PLUGINS = {
filter_plugin_class.name(): filter_plugin_class
for filter_plugin_class in __DEFAULT_FILTER_PLUGINS
}
class Filtering(Protocol):
"""Classes that adhere to the Filtering protocol
provide an 'apply' method to an input that returns True
if and only if the input passes its filter.
These are also callable, so calling a filter on an input
is functionally equivalent to calling its apply method.
"""
def __call__(self, *args, **kwargs) -> bool:
raise NotImplementedError("Filtering protocol hasn't implemented __call__ yet!")
def apply(self, *args, **kwargs) -> bool:
raise NotImplementedError("Filtering protocol hasn't implemented Apply yet!")
@dataclass
class Filter(Filtering):
"""A named filter that can be applied to some data.
You can use this filter by just calling it on some data.
my_signal = [1,2,3,4]
filter = Filter(...)
passed_filter: bool = filter(my_signal)
Parameters
----------
config : FilterConfig
A description of this filter's configuration (e.g. where it was loaded from).
plugin : FilterPlugin
The actual implementation of this filter.
We have this class defined with
"""
config: FilterConfig
plugin: FilterPlugin
def __call__(self, *args, **kwargs) -> bool:
return self.plugin(*args, **kwargs)
def apply(self, *args, **kwargs) -> bool:
return self.plugin.apply(*args, **kwargs)
@property
def name(self) -> FilterName:
return FilterName(self.plugin.__class__.name())
def as_attr(self) -> Dict[str, Any]:
name = self.name
attrs = {**vars(self.config), **vars(self.plugin), name: name}
return attrs
def from_attr(self, attr) -> IsAttr:
...
import json
@dataclass
class HDF5_FilterSerialable(Filter, HDF5_GroupSerialableDataclass):
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
log = log if log is not None else getLogger()
# Note: This line simply registers a group with the name 'name' in the parent group.
this_group = HDF5_Group(parent_group.require_group(self.name))
all_attrs = {**self.config, **vars(self.plugin)}
this_group.create_attrs(all_attrs)
# Implementers must now write their serialized instance to this group.
return this_group
@classmethod
def from_group(
cls, group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerialableDataclass:
# You see, the trouble is, in the above 'as_group' call, we lumped together
# all the attributes of the FilterConfig and the FilterPlugin, not knowing
# which attributes belonged to which class.
#
# Now, here in `from_group`, it's time to pay the piper and figure out which attribute
# goes where to create a new Filter instance.
#
# This is likely achievable through the plugin architecture, since the plugin's
# name is unique, we can try to find a plugin with a given name, then get its attributes from there.
# Load
log.warning("Filter.from_group not implemented...It's a whole thing (see comment)")
# This is pure <NAME>.
return super().from_group(group, log)
# class Filters(HDF5_GroupSerialableDataclass):
# filters:
Filters = Dict[FilterName, Filter]
def get_filters(filter_configs: Optional[FilterConfigs] = None) -> Filters:
"""Creates Filters from a list of filter configurations.
Parameters
----------
filter_configs : Optional[FilterConfigs]
A mapping of filter names to their configurations, None by default (i.e. no filtering).
Returns
-------
Filters
A set of callable/applyable filters.
"""
filter_configs = filter_configs if filter_configs is not None else FilterConfigs({})
my_filters = {
name: filter_from_config(name, filter_config)
for name, filter_config in filter_configs.items()
}
return my_filters
def does_pass_filters(capture: CaptureOrTimeSeries, filters: Iterable[Filter]) -> bool:
"""
Check whether an array of values (e.g. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : Iterable[Filter]
The set of filters to apply. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
all_passed = True
for some_filter in filters:
if not some_filter(capture):
return False
return all_passed
@dataclass(frozen=True)
class FilterSetProtocol(Filtering, Protocol):
filter_set_id: FilterSetId
filters: Filters
@classmethod
def from_filter_configs(cls, name: FilterSetId, filter_configs: FilterConfigs = None):
...
@dataclass(frozen=True, init=False)
class FilterSet(FilterSetProtocol):
"""
A collection of filters with a name for easy
identification. Essentially a mapping of filter names to their implementations.
"""
def validate(self):
raise NotImplementedError("Implement validation for filters!")
def __init__(self, filter_set_id: FilterSetId, filters: Filters) -> None:
filterset = super().__init__(self)
object.__setattr__(self, "filter_set_id", filter_set_id)
object.__setattr__(self, "filters", filters)
# self.name = name
# self.filters = filters
############################
#
# FilterSetProtocol
#
############################
@classmethod
def from_filter_configs(cls, name: FilterSetId, filter_configs: FilterConfigs = None):
filters: Filters = get_filters(filter_configs)
filter_set = cls.__new__(cls, name, filters)
filter_set.__init__(name, filters)
return filter_set
def apply(self, capture: CaptureOrTimeSeries) -> bool:
return does_pass_filters(capture, self.filters.values())
def __call__(self, capture: CaptureOrTimeSeries) -> bool:
return self.apply(capture)
class HDF5_FilterSet(FilterSet, HDF5_GroupSerialableDataclass):
def __init__(self, filter_set: FilterSet) -> None:
self._filterset = filter_set
############################
#
# HDF5_GroupSerializable
#
############################
def name(self):
return self._filterset.filter_set_id
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
filter_set_group = parent_group.require_group(self.name())
for name, filter_t in self._filterset.filters.items():
hdf5_filter = HDF5_FilterSerialable(filter_t.config, filter_t.plugin)
hdf5_filter.as_group(filter_set_group)
return HDF5_Group(filter_set_group)
# @classmethod
# def from_group(
# cls, group: HDF5_Group, log: Optional[Logger] = None
# ) -> HDF5_GroupSerializable:
# raise NotImplementedError(
# f"from_group not implemented for {cls.__name__}. Make sure you write a method that returns a serialzied version of this object."
# )
def filter_from_config(name: str, config: FilterConfig, log: Logger = getLogger()) -> Filter:
"""Creates a Filter from a config spefication. If no "filename" is present in the FilterConfig, it's
assumed to be one of the default filtesr
Parameters
----------
name : str
The unique name of a filter.
config : FilterConfig
Filter configuration to build the plugin.
log : Logger, optional
Logger to use for information/warnings/debug, by default getLogger()
Returns
-------
Filter
A filter that can be applied to some data.
Raises
------
AttributeError
A filter plugin could not be built from the configuration description. If this error is raised, be sure to check
1) A plugin class with the name in the configuration is defined at the filepath described in the configuration
2) The plugin class inherits from the `FilterPlugin` abstract base class.
"""
filepath = config.get("filepath", None)
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = None
if name in DEFAULT_FILTER_PLUGINS:
plugin = DEFAULT_FILTER_PLUGINS[name]()
else:
# TODO: For non-default FilterPlugins, load the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = plugin_from_file(name, filepath)
pass
# Make sure any plugin attributes defined in the config are moved over to the plugin instance.
try:
# Here, we take care of setting whatever attributes the plugin config defines on the new plugin instance.
for key, value in config.items():
object.__setattr__(plugin, key, value)
except AttributeError as e:
log.warning(
"""
Uh oh, couldn't find plugin '{name}'. Are you sure:
1) A plugin class with the name '{name}' is defined in the file {filepath}?
2) That plugin class inherits from `FilterPlugin`?
"""
)
raise e
my_filter = Filter(config, plugin)
return my_filter
def plugin_from_file(name: str, filepath: PathLikeOrString):
"""[summary]
Parameters
----------
name : str
[description]
filepath : PathLikeOrString
[description]
Returns
-------
[type]
[description]
Raises
------
NotImplementedError
[description]
"""
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
raise NotImplementedError(
"Plugin from file has not been implemented! This method should take in a filepath and filter name, and return a runnable FilterPlugin!"
)
| 2.109375 | 2 |
Chapter 11/wrong_type.py | nescience8/starting-out-with-python-global-4th-edition | 35 | 7181 | <gh_stars>10-100
def main():
# Pass a string to show_mammal_info...
show_mammal_info('I am a string')
# The show_mammal_info function accepts an object
# as an argument, and calls its show_species
# and make_sound methods.
def show_mammal_info(creature):
creature.show_species()
creature.make_sound()
# Call the main function.
main()
| 3.078125 | 3 |
fase2-exercicios/cap2/lista-de-exercicios/RM94336_EX04.py | Leodf/FIAP | 0 | 7182 | <gh_stars>0
"""
4 – Um grande cliente seu sofreu um ataque hacker: o servidor foi sequestrado por um software malicioso, que criptografou todos os discos e pede a digitação de uma senha para a liberação da máquina. E é claro que os criminosos exigem um pagamento para informar a senha.
Ao analisar o código do programa deles, porém, você descobre que a senha é composta da palavra “LIBERDADE” seguida do fatorial dos minutos que a máquina estiver marcando no momento da digitação da senha (se a máquina estiver marcando 5 minutos, a senha será <PASSWORD>). Crie um programa que receba do usuário os minutos atuais e exiba na tela a senha necessária para desbloqueio. ATENÇÃO: seu programa não pode utilizar funções prontas para o cálculo do fatorial. Ele deve obrigatoriamente utilizar loop.
"""
print('\nPrograma para gerar de desbloqueio do servidor do ataque Hacker!!!\n')
print('Descobrimos que a senha é a palavra LIBERDADE + o calculo de fatorial dos minutos no seu computador.\n')
minuto = input('Digite os minutos que aparecem neste computador: ')
minuto = int(minuto)
fatorial = 1
for i in range (minuto, 0, -1):
fatorial *= i
print(f'\nA senha que você precisa digitar é LIB<PASSWORD>{fatorial} para desbloquear o servidor.\nAtenção!!!: você tem 60 segundos validos até que a senha mude novamente!!!\n') | 3.671875 | 4 |
dosagelib/helpers.py | yasen-m/dosage | 0 | 7183 | <reponame>yasen-m/dosage
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 <NAME> and <NAME>
# Copyright (C) 2012-2014 <NAME>
from .util import fetchUrl, getPageContent, getQueryParams
def queryNamer(paramName, usePageUrl=False):
"""Get name from URL query part."""
@classmethod
def _namer(cls, imageUrl, pageUrl):
"""Get URL query part."""
url = pageUrl if usePageUrl else imageUrl
return getQueryParams(url)[paramName][0]
return _namer
def regexNamer(regex, usePageUrl=False):
"""Get name from regular expression."""
@classmethod
def _namer(cls, imageUrl, pageUrl):
"""Get first regular expression group."""
url = pageUrl if usePageUrl else imageUrl
mo = regex.search(url)
if mo:
return mo.group(1)
return _namer
def bounceStarter(url, nextSearch):
"""Get start URL by "bouncing" back and forth one time."""
@classmethod
def _starter(cls):
"""Get bounced start URL."""
data, baseUrl = getPageContent(url, cls.session)
url1 = fetchUrl(url, data, baseUrl, cls.prevSearch)
data, baseUrl = getPageContent(url1, cls.session)
return fetchUrl(url1, data, baseUrl, nextSearch)
return _starter
def indirectStarter(url, latestSearch):
"""Get start URL by indirection."""
@classmethod
def _starter(cls):
"""Get indirect start URL."""
data, baseUrl = getPageContent(url, cls.session)
return fetchUrl(url, data, baseUrl, latestSearch)
return _starter
| 2.1875 | 2 |
research/object_detection/data_decoders/tf_example_decoder_test.py | akshit-protonn/models | 18 | 7184 | <gh_stars>10-100
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.data_decoders.tf_example_decoder."""
import os
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
from object_detection.utils import dataset_util
from object_detection.utils import test_case
class TfExampleDecoderTest(test_case.TestCase):
def _create_encoded_and_decoded_data(self, data, encoding_type):
if encoding_type == 'jpeg':
encode_fn = tf.image.encode_jpeg
decode_fn = tf.image.decode_jpeg
elif encoding_type == 'png':
encode_fn = tf.image.encode_png
decode_fn = tf.image.decode_png
else:
raise ValueError('Invalid encoding type.')
def prepare_data_fn():
encoded_data = encode_fn(data)
decoded_data = decode_fn(encoded_data)
return encoded_data, decoded_data
return self.execute_cpu(prepare_data_fn, [])
def testDecodeAdditionalChannels(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg')
additional_channel = np.random.randint(256, size=(4, 5, 1)).astype(np.uint8)
(encoded_additional_channel,
decoded_additional_channel) = self._create_encoded_and_decoded_data(
additional_channel, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/additional_channels/encoded':
dataset_util.bytes_list_feature(
[encoded_additional_channel] * 2),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_additional_channels=2)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
np.concatenate([decoded_additional_channel] * 2, axis=2),
tensor_dict[fields.InputDataFields.image_additional_channels])
def testDecodeJpegImage(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, decoded_jpeg = self._create_encoded_and_decoded_data(
image, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.image].get_shape().as_list()),
[None, None, 3])
self.assertAllEqual(
(output[fields.InputDataFields.original_image_spatial_shape]
.get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual(
six.b('image_id'), tensor_dict[fields.InputDataFields.source_id])
def testDecodeImageKeyAndFilename(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(image, 'jpeg')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/key/sha256':
dataset_util.bytes_feature(six.b('abc')),
'image/filename':
dataset_util.bytes_feature(six.b('filename'))
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertEqual(six.b('abc'), tensor_dict[fields.InputDataFields.key])
self.assertEqual(
six.b('filename'), tensor_dict[fields.InputDataFields.filename])
def testDecodePngImage(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png, decoded_png = self._create_encoded_and_decoded_data(
image, 'png')
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id'))
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.image].get_shape().as_list()),
[None, None, 3])
self.assertAllEqual(
(output[fields.InputDataFields.original_image_spatial_shape]
.get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(decoded_png, tensor_dict[fields.InputDataFields.image])
self.assertAllEqual([4, 5], tensor_dict[fields.InputDataFields.
original_image_spatial_shape])
self.assertEqual(
six.b('image_id'), tensor_dict[fields.InputDataFields.source_id])
def testDecodePngInstanceMasks(self):
image = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_png, _ = self._create_encoded_and_decoded_data(image, 'png')
mask_1 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
mask_2 = np.random.randint(0, 2, size=(10, 10, 1)).astype(np.uint8)
encoded_png_1, _ = self._create_encoded_and_decoded_data(mask_1, 'png')
decoded_png_1 = np.squeeze(mask_1.astype(np.float32))
encoded_png_2, _ = self._create_encoded_and_decoded_data(mask_2, 'png')
decoded_png_2 = np.squeeze(mask_2.astype(np.float32))
encoded_masks = [encoded_png_1, encoded_png_2]
decoded_masks = np.stack([decoded_png_1, decoded_png_2])
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True,
instance_mask_type=input_reader_pb2.PNG_MASKS)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
decoded_masks,
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
def testDecodeEmptyPngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_png, _ = self._create_encoded_and_decoded_data(image_tensor, 'png')
encoded_masks = []
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks),
'image/height':
dataset_util.int64_feature(10),
'image/width':
dataset_util.int64_feature(10),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True,
instance_mask_type=input_reader_pb2.PNG_MASKS)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape,
[0, 10, 10])
def testDecodeBoundingBox(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
def testDecodeKeypointDepth(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
keypoint_depths = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
keypoint_depth_weights = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/z':
dataset_util.float_list_feature(keypoint_depths),
'image/object/keypoint/z/weights':
dataset_util.float_list_feature(keypoint_depth_weights),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_keypoints=3, load_keypoint_depth_features=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_keypoint_depths].get_shape(
).as_list()), [2, 3])
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_keypoint_depth_weights]
.get_shape().as_list()), [2, 3])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_keypoint_depths = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
self.assertAllClose(
expected_keypoint_depths,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths])
expected_keypoint_depth_weights = [[1.0, 0.9, 0.8], [0.7, 0.6, 0.5]]
self.assertAllClose(
expected_keypoint_depth_weights,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depth_weights])
def testDecodeKeypointDepthNoDepth(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
num_keypoints=3, load_keypoint_depth_features=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_keypoints_depth_default = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
self.assertAllClose(
expected_keypoints_depth_default,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depths])
self.assertAllClose(
expected_keypoints_depth_default,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_depth_weights])
def testDecodeKeypoint(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
keypoint_visibility = [1, 2, 0, 1, 0, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
'image/object/keypoint/visibility':
dataset_util.int64_list_feature(keypoint_visibility),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[2, 3, 2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = [
[[0.0, 1.0], [1.0, 2.0], [np.nan, np.nan]],
[[3.0, 4.0], [np.nan, np.nan], [5.0, 6.0]]]
self.assertAllClose(
expected_keypoints,
tensor_dict[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = (
(np.array(keypoint_visibility) > 0).reshape((2, 3)))
self.assertAllEqual(
expected_visibility,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities])
def testDecodeKeypointNoVisibilities(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
keypoint_ys = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
keypoint_xs = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/keypoint/y':
dataset_util.float_list_feature(keypoint_ys),
'image/object/keypoint/x':
dataset_util.float_list_feature(keypoint_xs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(num_keypoints=3)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_keypoints].get_shape().as_list()),
[2, 3, 2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
expected_boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
expected_keypoints = (
np.vstack([keypoint_ys, keypoint_xs]).transpose().reshape((2, 3, 2)))
self.assertAllEqual(
expected_keypoints,
tensor_dict[fields.InputDataFields.groundtruth_keypoints])
expected_visibility = np.ones((2, 3))
self.assertAllEqual(
expected_visibility,
tensor_dict[fields.InputDataFields.groundtruth_keypoint_visibilities])
def testDecodeDefaultGroundtruthWeights(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_boxes].get_shape().as_list()),
[None, 4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllClose(tensor_dict[fields.InputDataFields.groundtruth_weights],
np.ones(2, dtype=np.float32))
def testDecodeObjectLabel(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeMultiClassScores(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
flattened_multiclass_scores = [100., 50.] + [20., 30.]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/multiclass_scores':
dataset_util.float_list_feature(
flattened_multiclass_scores),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_multiclass_scores=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(flattened_multiclass_scores,
tensor_dict[fields.InputDataFields.multiclass_scores])
def testDecodeEmptyMultiClassScores(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_multiclass_scores=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertEqual(
(0,), tensor_dict[fields.InputDataFields.multiclass_scores].shape)
def testDecodeObjectLabelNoText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes = [1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithText(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
# Annotation label gets overridden by labelmap id.
annotated_bbox_classes = [3, 4]
expected_bbox_classes = [1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/class/label':
dataset_util.int64_list_feature(annotated_bbox_classes),
})).SerializeToString()
label_map_string = """
item {
id:1
name:'cat'
}
item {
id:2
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(expected_bbox_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('cheetah')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:2
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([2, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithDisplayName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
display_name:'cat'
}
item {
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelUnrecognizedNameWithMappingWithDisplayName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('cheetah')]
bbox_classes_id = [5, 6]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/class/label':
dataset_util.int64_list_feature(bbox_classes_id),
})).SerializeToString()
label_map_string = """
item {
name:'/m/cat'
id:3
display_name:'cat'
}
item {
name:'/m/dog'
id:1
display_name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, -1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectLabelWithMappingWithName(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_classes_text = [six.b('cat'), six.b('dog')]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual([3, 1],
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testDecodeObjectArea(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_area = [100., 174.]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/area':
dataset_util.float_list_feature(object_area),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_area].get_shape().as_list()), [2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(object_area,
tensor_dict[fields.InputDataFields.groundtruth_area])
def testDecodeVerifiedNegClasses(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
neg_category_ids = [0, 5, 8]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/neg_category_ids':
dataset_util.int64_list_feature(neg_category_ids),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
neg_category_ids,
tensor_dict[fields.InputDataFields.groundtruth_verified_neg_classes])
def testDecodeNotExhaustiveClasses(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
not_exhaustive_category_ids = [0, 5, 8]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/not_exhaustive_category_ids':
dataset_util.int64_list_feature(
not_exhaustive_category_ids),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
not_exhaustive_category_ids,
tensor_dict[fields.InputDataFields.groundtruth_not_exhaustive_classes])
def testDecodeObjectIsCrowd(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_is_crowd = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/is_crowd':
dataset_util.int64_list_feature(object_is_crowd),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_is_crowd].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_is_crowd],
tensor_dict[fields.InputDataFields.groundtruth_is_crowd])
def testDecodeObjectDifficult(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_difficult = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/difficult':
dataset_util.int64_list_feature(object_difficult),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_difficult].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_difficult],
tensor_dict[fields.InputDataFields.groundtruth_difficult])
def testDecodeObjectGroupOf(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_group_of = [0, 1]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/group_of':
dataset_util.int64_list_feature(object_group_of),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_group_of].get_shape().as_list()),
[2])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
[bool(item) for item in object_group_of],
tensor_dict[fields.InputDataFields.groundtruth_group_of])
def testDecodeObjectWeight(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
object_weights = [0.75, 1.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/weight':
dataset_util.float_list_feature(object_weights),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_weights].get_shape().as_list()),
[None])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(object_weights,
tensor_dict[fields.InputDataFields.groundtruth_weights])
def testDecodeClassConfidence(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
class_confidence = [0.0, 1.0, 0.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/class/confidence':
dataset_util.float_list_feature(class_confidence),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_image_confidences]
.get_shape().as_list()), [3])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
class_confidence,
tensor_dict[fields.InputDataFields.groundtruth_image_confidences])
def testDecodeInstanceSegmentation(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
self.assertAllEqual(
(output[fields.InputDataFields.groundtruth_instance_masks].get_shape(
).as_list()), [4, 5, 3])
self.assertAllEqual((output[
fields.InputDataFields.groundtruth_classes].get_shape().as_list()),
[4])
return output
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
instance_masks.astype(np.float32),
tensor_dict[fields.InputDataFields.groundtruth_instance_masks])
self.assertAllEqual(object_classes,
tensor_dict[fields.InputDataFields.groundtruth_classes])
def testInstancesNotAvailableByDefault(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks,
tensor_dict)
def testDecodeImageLabels(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
def graph_fn_1():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature(six.b('jpeg')),
'image/class/label': dataset_util.int64_list_feature([1, 2]),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn_1, [])
self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 2]))
def graph_fn_2():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/class/text':
dataset_util.bytes_list_feature(
[six.b('dog'), six.b('cat')]),
})).SerializeToString()
label_map_string = """
item {
id:3
name:'cat'
}
item {
id:1
name:'dog'
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn_2, [])
self.assertIn(fields.InputDataFields.groundtruth_image_classes, tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_image_classes],
np.array([1, 3]))
def testDecodeContextFeatures(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
num_features = 8
context_feature_length = 10
context_features = np.random.random(num_features*context_feature_length)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_feature(context_feature_length),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_context_features=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllClose(
context_features.reshape(num_features, context_feature_length),
tensor_dict[fields.InputDataFields.context_features])
self.assertAllEqual(
context_feature_length,
tensor_dict[fields.InputDataFields.context_feature_length])
def testContextFeaturesNotAvailableByDefault(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
num_features = 10
context_feature_length = 10
context_features = np.random.random(num_features*context_feature_length)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_feature(context_feature_length),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertNotIn(fields.InputDataFields.context_features,
tensor_dict)
def testExpandLabels(self):
label_map_string = """
item {
id:1
name:'cat'
ancestor_ids: 2
}
item {
id:2
name:'animal'
descendant_ids: 1
}
item {
id:3
name:'man'
ancestor_ids: 5
}
item {
id:4
name:'woman'
display_name:'woman'
ancestor_ids: 5
}
item {
id:5
name:'person'
descendant_ids: 3
descendant_ids: 4
}
"""
label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')
with tf.gfile.Open(label_map_path, 'wb') as f:
f.write(label_map_string)
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
bbox_classes_text = [six.b('cat'), six.b('cat')]
bbox_group_of = [0, 1]
image_class_text = [six.b('cat'), six.b('person')]
image_confidence = [1.0, 0.0]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/class/text':
dataset_util.bytes_list_feature(bbox_classes_text),
'image/object/group_of':
dataset_util.int64_list_feature(bbox_group_of),
'image/class/text':
dataset_util.bytes_list_feature(image_class_text),
'image/class/confidence':
dataset_util.float_list_feature(image_confidence),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
label_map_proto_file=label_map_path, expand_hierarchy_labels=True)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
boxes = np.vstack([bbox_ymins, bbox_xmins, bbox_ymaxs,
bbox_xmaxs]).transpose()
expected_boxes = np.stack(
[boxes[0, :], boxes[0, :], boxes[1, :], boxes[1, :]], axis=0)
expected_boxes_class = np.array([1, 2, 1, 2])
expected_boxes_group_of = np.array([0, 0, 1, 1])
expected_image_class = np.array([1, 2, 3, 4, 5])
expected_image_confidence = np.array([1.0, 1.0, 0.0, 0.0, 0.0])
self.assertAllEqual(expected_boxes,
tensor_dict[fields.InputDataFields.groundtruth_boxes])
self.assertAllEqual(expected_boxes_class,
tensor_dict[fields.InputDataFields.groundtruth_classes])
self.assertAllEqual(
expected_boxes_group_of,
tensor_dict[fields.InputDataFields.groundtruth_group_of])
self.assertAllEqual(
expected_image_class,
tensor_dict[fields.InputDataFields.groundtruth_image_classes])
self.assertAllEqual(
expected_image_confidence,
tensor_dict[fields.InputDataFields.groundtruth_image_confidences])
def testDecodeDensePose(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0, 2.0]
bbox_xmins = [1.0, 5.0, 8.0]
bbox_ymaxs = [2.0, 6.0, 1.0]
bbox_xmaxs = [3.0, 7.0, 3.3]
densepose_num = [0, 4, 2]
densepose_part_index = [2, 2, 3, 4, 2, 9]
densepose_x = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
densepose_y = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4]
densepose_u = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06]
densepose_v = [0.99, 0.98, 0.97, 0.96, 0.95, 0.94]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/densepose/num':
dataset_util.int64_list_feature(densepose_num),
'image/object/densepose/part_index':
dataset_util.int64_list_feature(densepose_part_index),
'image/object/densepose/x':
dataset_util.float_list_feature(densepose_x),
'image/object/densepose/y':
dataset_util.float_list_feature(densepose_y),
'image/object/densepose/u':
dataset_util.float_list_feature(densepose_u),
'image/object/densepose/v':
dataset_util.float_list_feature(densepose_v),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_dense_pose=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
dp_num_points = output[fields.InputDataFields.groundtruth_dp_num_points]
dp_part_ids = output[fields.InputDataFields.groundtruth_dp_part_ids]
dp_surface_coords = output[
fields.InputDataFields.groundtruth_dp_surface_coords]
return dp_num_points, dp_part_ids, dp_surface_coords
dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu(
graph_fn, [])
expected_dp_num_points = [0, 4, 2]
expected_dp_part_ids = [
[0, 0, 0, 0],
[2, 2, 3, 4],
[2, 9, 0, 0]
]
expected_dp_surface_coords = np.array(
[
# Instance 0 (no points).
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
# Instance 1 (4 points).
[[0.9, 0.1, 0.99, 0.01],
[0.8, 0.2, 0.98, 0.02],
[0.7, 0.3, 0.97, 0.03],
[0.6, 0.4, 0.96, 0.04]],
# Instance 2 (2 points).
[[0.5, 0.5, 0.95, 0.05],
[0.4, 0.6, 0.94, 0.06],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
], dtype=np.float32)
self.assertAllEqual(dp_num_points, expected_dp_num_points)
self.assertAllEqual(dp_part_ids, expected_dp_part_ids)
self.assertAllClose(dp_surface_coords, expected_dp_surface_coords)
def testDecodeTrack(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0, 2.0]
bbox_xmins = [1.0, 5.0, 8.0]
bbox_ymaxs = [2.0, 6.0, 1.0]
bbox_xmaxs = [3.0, 7.0, 3.3]
track_labels = [0, 1, 2]
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
'image/object/track/label':
dataset_util.int64_list_feature(track_labels),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_track_id=True)
output = example_decoder.decode(tf.convert_to_tensor(example))
track_ids = output[fields.InputDataFields.groundtruth_track_ids]
return track_ids
track_ids = self.execute_cpu(graph_fn, [])
expected_track_labels = [0, 1, 2]
self.assertAllEqual(track_ids, expected_track_labels)
if __name__ == '__main__':
tf.test.main()
| 2.234375 | 2 |
counting_capitals.py | m10singh94/Python-programs | 0 | 7185 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 08:09:31 2020
@author: <NAME>
"""
def count_capitals(string):
count = 0
for ch in string:
if ord(ch) >= 65 and ord(ch) <= 90:
count += 1
return count
def remove_substring_everywhere(string, substring):
'''
Remove all occurrences of substring from string, and return
the resulting string. Both arguments must be strings.
'''
p = string.find(substring)
if p == -1:
return string
i = p
newstr = string[0:i]
lsub = len(substring) # length of the substring
while p < len(string) and string.find(substring) != -1:
p = string.find(substring)
if p==-1:
return newstr+string[i+lsub:]
newstr += string[p + lsub : p]
return newstr | 3.921875 | 4 |
admit/at/GenerateSpectrum_AT.py | astroumd/admit | 4 | 7186 | """ .. _GenerateSpectrum-at-api:
**GenerateSpectrum_AT** --- Generates synthetic test spectra.
-------------------------------------------------------------
This module defines the GenerateSpectrum_AT class.
"""
from admit.AT import AT
import admit.util.bdp_types as bt
from admit.bdp.CubeSpectrum_BDP import CubeSpectrum_BDP
import admit.util.filter.Filter1D as Filter1D
import admit.util.Table as Table
import admit.util.utils as utils
from admit.util import APlot
import admit.util.Image as Image
from admit.util import SpectralLineSearch
from admit.Summary import SummaryEntry
import os
import numpy as np
from copy import deepcopy
class GenerateSpectrum_AT(AT):
""" Define a synthetic CubeSpectrum for testing.
This task is only intended to generate synthetic spectra with noise, and
to efficiently test LineID with a CubeSpectrum_BDP input. You can add continuum
as well, and add any number of gaussian's, or even optionally read
in an ASCII spectrum, and add noise and gaussians to this. Multiple spectra
can be in the CubeSpectrum_BDP.
When noise is added, spectra have a fixed RMS = 1.0, i.e. spectra are assumed
to be in dimensionless S/N units.
**Keywords**
**file**: string
Name of an ASCII file that contains a spectrum, optional. The first column must be frequency
and the second column must be the intensity. If you just want to read a spectrum
and not add noise, set seed=-1.
Default: blank.
**nchan**: int
Number of output channels per spectrum. Ignored when file= given.
Default: 1000.
**nspectra**: int
Number of output spectra. More than one are meant for different
random realizations of the input conditions (either from file=
and/or from lines= via seed=), but they are all written to the same
BDP.
Default: 1.
**seed**: int
Seed for random number generator.
0 is a special value that uses a random
realization per call, e.g. time of the day.
Use any other positive value to seed with a repeatable random
sequence.
-1 is a special value to disable the random number generator noise
(for example if an input spectrum should not be polluted with random noise).
Default: 0.
**contin**: float
Continuum level level added to the noise spectra. You can only add a continuum
level when noise is added as well, i.e. when seed >= 0.
Default: 0.0.
**freq**: float
The central frequency of the band in GHz.
Default: 115.2712018.
**delta**: float
The size of each channel in MHz.
Default: 0.5.
**lines**: list of tuples
Parameters for each Gaussian line. Intensity (SNR), center frequency in GHz, FHWM in km/s.
Examples:
[(15.0, 110.201, 22.0)]
Produce a single Gaussian centered at 110.201 GHz that is 15.0 sigma tall with a FWHM of
22.0 km/s.
[(12.0, 109.98, 15.3), (6.0, 110.0, 15.0)]
Produce two Gaussians, one centered at 109.98 GHz with a peak of 12.0 sigma and FWHM of 15.3
kms, and a second centered at 110.0 GHz with an intensity of 6.0 sigma and FWHM of 15.0 km/s.
Default: [].
**transitions**: list
List of any transitions to be included in the spectrum. Each entry should be a list
containing the molecule name, frequency range (in GHz), intensity(SNR), FWHM in km/s and
offset in km/s.
Any tranitions from the given molecule(s) and frequency range will be
included. Example of entry:
[("13COv=0", [110.15, 110.25], 6.0, 30.0, 5.0),
("CH3CNv=0", [110.2, 110.5], 4.5, 10.0, 0.0)]
This will produce a single 13CO line with a peak intensity of 6 sigma, FWHM of 30.0 km/s
and centered at a offset velocity of 5.0 km/s; and a set of 6 of CH3CN lines (with hyperfine
components), with the highest line strength transition peaking at 4.5 sigma, and the rest
proportionally weaker based on line strength, all with a FWHM of 10.0 km/s and no offset.
Molecules can be given multiple times for the purpose of having multiple velocity components.
Default: [].
**hanning**: bool
If True then do a final (1/4,1/2,1/4) hanning smooth over 3 channels.
Default: False.
**Input BDPs**
None
**Output BDPs**
**CubeSpectrum_BDP**: count: 1
Spectrum through the cube. Stored as a single multi-plane table if nspectra > 1.
Output BDP name takes from the input Image by replacing the extension with "csp".
See also :ref:`CubeSpectrum-bdp-api`.
Parameters
----------
keyval : dictionary, optional
Keyword values.
Attributes
----------
_version : string
Version string.
"""
def __init__(self,**keyval):
keys = {"file" : "",
"nchan" : 1000,
"nspectra" : 1,
"seed" : 0, # -1 is special for no noise
"contin" : 0.0,
"freq" : 115.2712018,
"delta" : 0.5, # channel width in MHz
"lines" : [], # [(snr,freq0,fwhm),...]
"transitions" : [],
"hanning" : False,
}
AT.__init__(self,keys,keyval)
self._version = "1.0.0"
self.set_bdp_in([])
self.set_bdp_out([(CubeSpectrum_BDP,1)])
self.spec_description = [] # for summary()
def summary(self):
"""Returns the summary dictionary from the AT, for merging
into the ADMIT Summary object.
GenerateSpectrum_AT adds the following to ADMIT summary:
.. table::
:class: borderless
+---------+----------+---------------------------+
| Key | type | Description |
+=========+==========+===========================+
| spectra | list | the spectral plots |
+---------+----------+---------------------------+
Parameters
----------
None
Returns
-------
dict
Dictionary of SummaryEntry
"""
if hasattr(self,"_summary"):
return self._summary
else:
return {}
def run(self):
"""Runs the task.
Parameters
----------
None
Returns
-------
None
"""
self._summary = {}
dt = utils.Dtime("CubeSpectrum")
seed = self.getkey("seed")
if seed <= 0:
np.random.seed()
else:
np.random.seed(seed)
#print "RANDOM.GET_STATE:",np.random.get_state()
contin = self.getkey("contin")
rms = 1.0 # not a user parameter, we do all spectra in S/N space
f0 = self.getkey("freq") # central frequency in band
df = self.getkey("delta") / 1000.0 # channel width (in GHz)
nspectra = self.getkey("nspectra")
taskargs = " contin=%f freq=%f delta=%f nspectra=%f " % (contin,f0,df,nspectra)
spec = range(nspectra)
dt.tag("start")
if self.getkey("file") != "":
print "READING spectrum from",self.getkey("file")
(freq, spec[0]) = getspec(self.getkey("file"))
nchan = len(freq)
print "Spectrum %d chans from %f to %f: min/max = %f %f" % (nchan, freq.min(), freq.max(), spec[0].min(), spec[0].max())
# @todo nspectra>1 not tested
for i in range(1,nspectra):
spec[i] = deepcopy(spec[0])
dt.tag("getspec")
else:
nchan = self.getkey("nchan")
freq = np.arange(nchan, dtype=np.float64)
center = int(nchan/2)
for i in range(nchan):
freq[i] = f0 + (float((i - center)) * df)
for i in range(nspectra):
spec[i] = np.zeros(nchan)
chans = np.arange(nchan)
taskargs += " nchan = %d" % nchan
for i in range(nspectra):
if seed >= 0:
spec[i] += np.random.normal(contin, rms, nchan)
# print "MEAN/STD",spec[i].mean(),spec[i].std()
lines = self.getkey("lines")
sls = SpectralLineSearch(False)
for item in self.getkey("transitions"):
kw = {"include_only_nrao" : True,
"line_strengths": ["ls1", "ls2"],
"energy_levels" : ["el2", "el4"],
"fel" : True,
"species" : item[0]
}
results = sls.search(item[1][0], item[1][1], "off", **kw)
# look at line strengths
if len(results) > 0:
mx = 0.0
indx = -1
for i in range(len(results)):
if results[i].getkey("linestrength") > mx:
indx = i
mx = results[i].getkey("linestrength")
for res in results:
if mx > 0.0:
lines.append([item[2] * res.getkey("linestrength") / mx, res.getkey("frequency") +
utils.veltofreq(item[4], res.getkey("frequency")), item[3]])
else:
lines.append([item[2], res.getkey("frequency") + utils.veltofreq(item[4],
res.getkey("frequency")), item[3]])
for item in lines:
for i in range(nspectra):
spec[i] += utils.gaussian1D(freq, item[0], item[1], utils.veltofreq(item[2], item[1]))
if self.getkey("hanning"):
for i in range(nspectra):
filter = Filter1D.Filter1D(spec[i], "hanning", **{"width" : 3})
spec[i] = filter.run()
dt.tag("hanning")
center = int(nchan/2)
dt.tag("open")
bdp_name = self.mkext("Genspec","csp")
b2 = CubeSpectrum_BDP(bdp_name)
self.addoutput(b2)
images = {} # png's accumulated
for i in range(nspectra):
sd = []
caption = "Generated Spectrum %d" % i
# construct the Table for CubeSpectrum_BDP
# @todo note data needs to be a tuple, later to be column_stack'd
labels = ["channel" ,"frequency" ,"flux" ]
units = ["number" ,"GHz" ,"" ]
data = (chans ,freq ,spec[i] )
# plane 0 : we are allowing a multiplane table, so the first plane is special
if i==0:
table = Table(columns=labels,units=units,data=np.column_stack(data),planes=["0"])
else:
table.addPlane(np.column_stack(data),"%d" % i)
# example plot , one per position for now
x = chans
xlab = 'Channel'
y = [spec[i]]
sd.append(xlab)
myplot = APlot(ptype=self._plot_type,pmode=self._plot_mode, abspath=self.dir())
ylab = 'Flux'
p1 = "%s_%d" % (bdp_name,i)
myplot.plotter(x,y,"",p1,xlab=xlab,ylab=ylab,thumbnail=True)
# Why not use p1 as the key?
ii = images["pos%d" % i] = myplot.getFigure(figno=myplot.figno,relative=True)
thumbname = myplot.getThumbnail(figno=myplot.figno,relative=True)
image = Image(images=images, description="CubeSpectrum")
sd.extend([ii, thumbname, caption])
self.spec_description.append(sd)
self._summary["spectra"] = SummaryEntry(self.spec_description,"GenerateSpectrum_AT",self.id(True), taskargs)
dt.tag("table")
b2.setkey("image",image)
b2.setkey("table",table)
b2.setkey("sigma",rms)
b2.setkey("mean",contin)
dt.tag("done")
dt.end()
# @todo this could go as a very generic routine in utils
#
def getspec(file, xcol=0, ycol=1):
""" read a spectrum/table from column 1,2
returns: (freq,spec)
"""
lines = open(file).readlines()
x = []
y = []
mincol = max(xcol,ycol) + 1
for line in lines:
if line[0] == '#':
continue
w = line.split()
if len(w) < mincol:
continue
x.append(float(w[xcol]))
y.append(float(w[ycol]))
return (np.array(x),np.array(y))
| 2.453125 | 2 |
lib/flows/general/discovery_test.py | nahidupa/grr | 1 | 7187 | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Tests for Interrogate."""
import socket
from grr.client import vfs
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact_test
from grr.lib import client_index
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import search
from grr.lib import test_lib
class DiscoveryTestEventListener(flow.EventListener):
"""A test listener to receive new client discoveries."""
well_known_session_id = rdfvalue.SessionID(flow_name="discovery_test")
EVENTS = ["Discovery"]
# For this test we just write the event as a class attribute.
event = None
@flow.EventHandler(auth_required=True)
def ProcessMessage(self, message=None, event=None):
_ = message
DiscoveryTestEventListener.event = event
class TestClientInterrogate(artifact_test.ArtifactTest):
"""Test the interrogate flow."""
def _CheckUsers(self, all_users):
"""Check all user stores."""
summary = self.fd.GetSummary()
self.assertItemsEqual([x.username for x in summary.users], all_users)
users = [x.username for x in self.fd.Get(self.fd.Schema.USER)]
self.assertItemsEqual(users, all_users)
self.assertItemsEqual(self.fd.Get(self.fd.Schema.USERNAMES), all_users)
# Check kb users
kbusers = [x.username for x in
self.fd.Get(self.fd.Schema.KNOWLEDGE_BASE).users]
self.assertItemsEqual(kbusers, all_users)
def _CheckAFF4Object(self, hostname, system, install_date):
self.assertEqual(self.fd.Get(self.fd.Schema.HOSTNAME), hostname)
self.assertEqual(self.fd.Get(self.fd.Schema.SYSTEM), system)
self.assertEqual(self.fd.Get(self.fd.Schema.INSTALL_DATE), install_date)
def _CheckClientInfo(self):
info = self.fd.Get(self.fd.Schema.CLIENT_INFO)
self.assertEqual(info.client_name, config_lib.CONFIG["Client.name"])
self.assertEqual(info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(info.build_time, config_lib.CONFIG["Client.build_time"])
def _CheckGRRConfig(self):
"""Check old and new client config."""
config_info = self.fd.Get(self.fd.Schema.GRR_CONFIGURATION)
self.assertEqual(config_info["Client.control_urls"],
["http://localhost:8001/control"])
self.assertEqual(config_info["Client.poll_min"], 1.0)
def _CheckClientIndex(self, host_pattern):
"""Check that the index has been updated."""
index_fd = aff4.FACTORY.Create(self.fd.Schema.client_index, "AFF4Index",
mode="r", token=self.token)
self.assertEqual(
[self.fd.urn],
[x for x in index_fd.Query([self.fd.Schema.HOSTNAME], host_pattern)])
def _CheckClientKwIndex(self, keywords, expected_count):
# Tests that the client index has expected_count results when
# searched for keywords.
index = aff4.FACTORY.Create(client_index.MAIN_INDEX,
aff4_type="ClientIndex",
mode="rw",
token=self.token)
self.assertEqual(len(index.LookupClients(keywords)),
expected_count)
def _CheckNotificationsCreated(self):
user_fd = aff4.FACTORY.Open("aff4:/users/test", token=self.token)
notifications = user_fd.Get(user_fd.Schema.PENDING_NOTIFICATIONS)
self.assertEqual(len(notifications), 1)
notification = notifications[0]
self.assertEqual(notification.subject, rdfvalue.RDFURN(self.client_id))
def _CheckClientSummary(self, osname, version, kernel="3.13.0-39-generic",
release="5"):
summary = self.fd.GetSummary()
self.assertEqual(summary.client_info.client_name,
config_lib.CONFIG["Client.name"])
self.assertEqual(summary.client_info.client_version,
int(config_lib.CONFIG["Client.version_numeric"]))
self.assertEqual(summary.client_info.build_time,
config_lib.CONFIG["Client.build_time"])
self.assertEqual(summary.system_info.system, osname)
self.assertEqual(summary.system_info.node, "test_node")
self.assertEqual(summary.system_info.release, release)
self.assertEqual(summary.system_info.version, version)
self.assertEqual(summary.system_info.machine, "i386")
self.assertEqual(summary.system_info.kernel, kernel)
self.assertEqual(len(summary.interfaces), 1)
self.assertEqual(summary.interfaces[0].mac_address, "123456")
# Check that the client summary was published to the event listener.
self.assertEqual(DiscoveryTestEventListener.event.client_id, self.client_id)
self.assertEqual(
DiscoveryTestEventListener.event.interfaces[0].mac_address,
"123456")
def _CheckNetworkInfo(self):
net_fd = self.fd.OpenMember("network")
interfaces = list(net_fd.Get(net_fd.Schema.INTERFACES))
self.assertEqual(interfaces[0].mac_address, "123456")
self.assertEqual(interfaces[0].addresses[0].human_readable, "192.168.127.12")
self.assertEqual(socket.inet_ntoa(interfaces[0].addresses[0].packed_bytes),
"192.168.127.12")
# Mac addresses should be available as hex for searching
mac_addresses = self.fd.Get(self.fd.Schema.MAC_ADDRESS)
self.assertTrue("123456".encode("hex") in str(mac_addresses))
# Same for IP addresses.
ip_addresses = self.fd.Get(self.fd.Schema.HOST_IPS)
self.assertTrue("192.168.127.12" in str(ip_addresses))
def _CheckVFS(self):
# Check that virtual directories exist for the mount points
fd = aff4.FACTORY.Open(self.client_id.Add("fs/os/mnt/data"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("fs/tsk/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
fd = aff4.FACTORY.Open(self.client_id.Add("devices/dev/sda"),
token=self.token)
# But no directory listing exists yet - we will need to fetch a new one
self.assertEqual(len(list(fd.OpenChildren())), 0)
def _CheckLabelIndex(self):
"""Check that label indexes are updated."""
self.assertEqual(
list(search.SearchClients("label:Label2", token=self.token)),
[self.client_id])
def _CheckWindowsDiskInfo(self):
client = aff4.FACTORY.Open(self.client_id, token=self.token)
volumes = client.Get(client.Schema.VOLUMES)
self.assertEqual(len(volumes), 2)
for result in volumes:
self.assertTrue(isinstance(result, rdfvalue.Volume))
self.assertTrue(result.windows.drive_letter in ["Z:", "C:"])
def _CheckRegistryPathspec(self):
# This tests that we can click refresh on a key in the registry vfs subtree
# even if we haven't downloaded any other key above it in the tree.
fd = aff4.FACTORY.Open(self.client_id.Add("registry").Add(
"HKEY_LOCAL_MACHINE").Add("random/path/bla"), token=self.token)
pathspec = fd.real_pathspec
self.assertEqual(pathspec.pathtype, rdfvalue.PathSpec.PathType.REGISTRY)
self.assertEqual(pathspec.CollapsePath(),
u"/HKEY_LOCAL_MACHINE/random/path/bla")
def _CheckRelease(self, desired_release, desired_version):
# Test for correct Linux release override behaviour.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
release = str(client.Get(client.Schema.OS_RELEASE))
version = str(client.Get(client.Schema.OS_VERSION))
self.assertEqual(release, desired_release)
self.assertEqual(version, desired_version)
def testInterrogateLinuxWithWtmp(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["LinuxWtmp",
"NetgroupConfiguration",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.netgroup_filter_regexes", [r"^login$"])
self.SetLinuxClient()
client_mock = action_mocks.InterrogatedClient("TransferBuffer", "StatFile",
"Find", "HashBuffer",
"ListDirectory",
"FingerprintFile")
client_mock.InitializeClient()
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Linux", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*test.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Linux", "14.4", release="Ubuntu",
kernel="3.13.0-39-generic")
self._CheckRelease("Ubuntu", "14.4")
# users 1,2,3 from wtmp
# users yagharek, isaac from netgroup
self._CheckUsers(["yagharek", "isaac", "user1", "user2", "user3"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckClientKwIndex(["Linux"], 1)
self._CheckClientKwIndex(["Label2"], 1)
def testInterrogateWindows(self):
"""Test the Interrogate flow."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
client_mock = action_mocks.InterrogatedClient("TransferBuffer", "StatFile",
"Find", "HashBuffer",
"ListDirectory",
"FingerprintFile")
self.SetWindowsClient()
client_mock.InitializeClient(system="Windows", version="6.1.7600",
kernel="6.1.7601")
# Run the flow in the simulated way
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
self.fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self._CheckAFF4Object("test_node", "Windows", 100 * 1000000)
self._CheckClientInfo()
self._CheckClientIndex(".*Host.*")
self._CheckGRRConfig()
self._CheckNotificationsCreated()
self._CheckClientSummary("Windows", "6.1.7600", kernel="6.1.7601")
# users Bert and Ernie added by the fixture should not be present (USERS
# overriden by kb)
# jim parsed from registry profile keys
self._CheckUsers(["jim", "kovacs"])
self._CheckNetworkInfo()
self._CheckVFS()
self._CheckLabelIndex()
self._CheckWindowsDiskInfo()
self._CheckRegistryPathspec()
self._CheckClientKwIndex(["Linux"], 0)
self._CheckClientKwIndex(["Windows"], 1)
self._CheckClientKwIndex(["Label2"], 1)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 1.9375 | 2 |
practices/20210112/GraphicsView.py | liff-engineer/articles | 2 | 7188 | <reponame>liff-engineer/articles
import sys
from PySide2.QtWidgets import QGraphicsView, QGraphicsScene, QApplication
from PySide2.QtCore import *
from PySide2.QtGui import *
class GraphicsView(QGraphicsView):
def __init__(self, parent=None):
super().__init__(parent)
# 画布视图尺寸
self.w = 64000.0
self.h = 32000.0
# 缩放相关
self.zoomInFactor = 1.25
self.zoomClamp = True
self.zoom = 10
self.zoomStep = 1
self.zoomRange = [0, 20]
self.setRenderHints(QPainter.Antialiasing | QPainter.HighQualityAntialiasing |
QPainter.TextAntialiasing | QPainter.SmoothPixmapTransform)
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setDragMode(QGraphicsView.RubberBandDrag)
self.setScene(QGraphicsScene())
self.setSceneRect(-self.w/2, -self.h/2, self.w, self.h)
def zoomImpl(self, bigOrSmall: bool):
zoomOutFactor = 1 / self.zoomInFactor
zoomFactor = zoomOutFactor
if bigOrSmall:
zoomFactor = self.zoomInFactor
self.zoom += self.zoomStep
else:
zoomFactor = zoomOutFactor
self.zoom -= self.zoomStep
clamped = False
if self.zoom < self.zoomRange[0]:
self.zoom, clamped = self.zoomRange[0], True
if self.zoom > self.zoomRange[1]:
self.zoom, clamped = self.zoomRange[1], True
if not clamped or self.zoomClamp is False:
self.scale(zoomFactor, zoomFactor)
def panBeginImpl(self, event):
releaseEvent = QMouseEvent(QEvent.MouseButtonRelease, event.localPos(), event.screenPos(),
Qt.LeftButton, Qt.NoButton, event.modifiers())
super().mouseReleaseEvent(releaseEvent)
self.setDragMode(QGraphicsView.ScrollHandDrag)
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() | Qt.LeftButton, event.modifiers())
super().mousePressEvent(fakeEvent)
def panEndImpl(self, event):
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() & ~Qt.LeftButton, event.modifiers())
super().mouseReleaseEvent(fakeEvent)
self.setDragMode(QGraphicsView.RubberBandDrag)
def keyPressEvent(self, event):
if event.matches(QKeySequence.ZoomIn):
self.zoomImpl(True)
elif event.matches(QKeySequence.ZoomOut):
self.zoomImpl(False)
else:
super().keyPressEvent(event)
def wheelEvent(self, event):
if self.dragMode() == QGraphicsView.ScrollHandDrag:
return
return self.zoomImpl(event.angleDelta().y() > 0)
def mousePressEvent(self, event):
if event.button() == Qt.MiddleButton:
return self.panBeginImpl(event)
super().mousePressEvent(event)
def mouseReleaseEvent(self, event):
if event.button() == Qt.MiddleButton:
return self.panEndImpl(event)
super().mouseReleaseEvent(event)
if __name__ == "__main__":
app = QApplication(sys.argv)
appView = GraphicsView()
appView.scene().addSimpleText('<EMAIL>')
appView.scene().addRect(-200, -150, 400, 300)
appView.show()
sys.exit(app.exec_())
| 2.15625 | 2 |
armstrong/hatband/tests/_utils.py | joncotton/armstrong.hatband | 0 | 7189 | from armstrong.dev.tests.utils import ArmstrongTestCase
import random
def random_range():
# TODO: make sure this can only be generated once
return range(random.randint(1000, 2000))
class HatbandTestCase(ArmstrongTestCase):
pass
class HatbandTestMixin(object):
script_code = """
<script type="text/javascript" src="/static/ckeditor/ckeditor.js"></script>
""".strip()
textarea_code = 'class="ckeditor"></textarea>'
def assertCkEditorPresent(self, response):
self.assertContains(response, self.script_code)
self.assertContains(response, self.textarea_code)
def assertCkEditorNotPresent(self, response):
self.assertNotContains(response, self.script_code)
self.assertNotContains(response, self.textarea_code)
| 2.421875 | 2 |
tests/propositional/test_natural_deduction.py | ariroffe/logics | 12 | 7190 | import unittest
from logics.classes.propositional import Inference, Formula
from logics.classes.propositional.proof_theories import NaturalDeductionStep, NaturalDeductionRule
from logics.utils.parsers import classical_parser
from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system
class TestClassicalNaturalDeductionSystem(unittest.TestCase):
def test_natural_deduction_rule(self):
"""Test overriding of index and len methods in NaturalDeductionRule"""
rule = NaturalDeductionRule([
'(...)',
NaturalDeductionStep(Formula(['→', ['A'], ['B']])),
'(...)',
NaturalDeductionStep(Formula(['B']), 'E→', [0, 1])
])
self.assertEqual(rule.index(NaturalDeductionStep(Formula(['B']), 'E→', [0, 1])), 1)
self.assertEqual(len(rule), 2)
def test_nd_system(self):
"""Test the method that tells if a step is a correct application of a rule"""
# A correct derivation
deriv = classical_parser.parse_derivation(
"""p; premise
(p → q); premise
q; E→; [1, 0]; []
p ∧ q; I∧; [0, 2]; []""",
natural_deduction=True)
# Check is application of the correct rule, and a different rule
self.assertTrue(nd_system.is_correct_application(deriv, 2, nd_system.rules['E→']))
self.assertFalse(nd_system.is_correct_application(deriv, 2, nd_system.rules['E∧2']))
self.assertTrue(nd_system.is_correct_application(deriv, 3, nd_system.rules['I∧']))
self.assertFalse(nd_system.is_correct_application(deriv, 3, nd_system.rules['E→']))
# Check is correct derivation of the correct and an incorrect inference
i = Inference([Formula(['p']), Formula(['→', ['p'], ['q']])],
[Formula(['∧', ['p'], ['q']])])
self.assertTrue(nd_system.is_correct_derivation(deriv, i))
i2 = Inference([Formula(['p']), Formula(['→', ['p'], ['q']])],
[Formula(['∧', ['q'], ['p']])])
self.assertFalse(nd_system.is_correct_derivation(deriv, i2))
# Repeating steps should not alter the outcome (should print a warning)
# deriv2_0 = classical_parser.parse_derivation(
# """p; supposition; []; [0]
# p; repetition; [0, 0]; [0]""",
# natural_deduction=True)
# self.assertTrue(nd_system.is_correct_application(deriv2_0, 1, nd_system.rules['repetition']))
# Test step in the future
deriv2_1 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [1]; [0]""",
natural_deduction=True)
deriv2_2 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [2]; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv2_1, 1, nd_system.rules['repetition']))
self.assertFalse(nd_system.is_correct_application(deriv2_2, 1, nd_system.rules['repetition']))
# -------------------------------------------------
# Test incorrect use of suppositions
# Using a step in a closed supposition
deriv3_1 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [0]; [0]
(p → p); I→; [0, 1]; []
p; E→; [2, 0]; []""",
natural_deduction=True)
# Check correct application of rep and I→
self.assertTrue(nd_system.is_correct_application(deriv3_1, 1, nd_system.rules['repetition']))
self.assertTrue(nd_system.is_correct_application(deriv3_1, 2, nd_system.rules['I→']))
self.assertFalse(nd_system.is_correct_application(deriv3_1, 3, nd_system.rules['E→']))
# Closing a supposition with a rule that does not close
deriv3_2 = classical_parser.parse_derivation('''
p; premise
p; supposition; []; [1]
p; repetition; [0]; [1]
(p ∨ q); I∨1; [0]; []''',
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_2, 3, nd_system.rules['I∨1']))
# Closing two suppositions at once
deriv3_3 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; supposition; [0]; [0, 1]
(p → p); I→; [0, 1]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_3, 2, nd_system.rules['I→']))
# Not closing a supposition with a rule that does close
deriv3_4 = classical_parser.parse_derivation(
"""p; supposition; []; [0]
p; repetition; [0]; [0]
(p → p); I→; [0, 1]; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_application(deriv3_4, 2, nd_system.rules['I→']))
# Incorrect opening of suppositions
deriv3_5 = classical_parser.parse_derivation(
"""p; supposition; []; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv3_5, None))
deriv3_6 = classical_parser.parse_derivation(
"""p; premise; []; []
q; supposition; []; [0]""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv3_6, None))
# -------------------------------------------------
# A correct derivation using all the rules
deriv4 = classical_parser.parse_derivation(
"""q; premise; []; []
~q; supposition; []; [1]
~q; repetition; [1]; [1]
(q ∧ ~q); I∧; [0, 2]; [1]
q; E∧1; [3]; [1]
⊥; E~; [1, 4]; [1]
p; EFSQ; [5]; [1]
⊥; repetition; [5]; [1]
~~q; I~; [1, 7]; []
q; ~~; [8]; []
q; supposition; []; [10]
q; repetition; [10]; [10]
(q → q); I→; [10, 11]; []
q; E→; [12, 9]; []
(q ∨ p); I∨1; [13]; []
(p → q); premise; []; []
q; E∨; [14, 12, 15]; []
""", natural_deduction=True)
i3 = Inference([Formula(['q']), Formula(['→', ['p'], ['q']])],
[Formula(['q'])])
self.assertTrue(nd_system.is_correct_derivation(deriv4, i3))
def test_rule_order(self):
# i1 is conjunction introduction
i1 = Inference([Formula(['p']), Formula(['q'])],
[Formula(['∧', ['p'], ['q']])])
# First derivation: standard one
deriv1_1 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(p ∧ q); I∧; [0, 1]; []""",
natural_deduction=True)
self.assertTrue(nd_system.is_correct_derivation(deriv1_1, i1))
# Second derivation: reverse on_steps order
deriv1_2 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(p ∧ q); I∧; [1, 0]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv1_2, i1))
i2 = Inference([Formula(['p']), Formula(['q'])],
[Formula(['∧', ['q'], ['p']])])
# Third derivation: reverse the conjuncts
deriv2_1 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(q ∧ p); I∧; [1, 0]; []""",
natural_deduction=True)
self.assertTrue(nd_system.is_correct_derivation(deriv2_1, i2))
# Fourth derivation: reverse the conjuncts and the on_steps
deriv2_2 = classical_parser.parse_derivation(
"""p; premise; []; []
q; premise; []; []
(q ∧ p); I∧; [0, 1]; []""",
natural_deduction=True)
self.assertFalse(nd_system.is_correct_derivation(deriv2_2, i2))
if __name__ == '__main__':
unittest.main()
| 3.109375 | 3 |
src/trusted/validator_arm/dgen_decoder_output.py | cohortfsllc/cohort-cocl2-sandbox | 2,151 | 7191 | <filename>src/trusted/validator_arm/dgen_decoder_output.py
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating the decoder based on parsed
table representations.
"""
import dgen_opt
import dgen_output
import dgen_actuals
# This file generates the class decoder Decoder as defined by the
# decoder tables. The code is specifically written to minimize the
# number of decoder classes needed to parse valid ARM
# instructions. Many rows in the table use the same decoder class. In
# addition, we optimize tables by merging, so long as the same decoder
# class is built.
#
# The following files are generated:
#
# decoder.h
# decoder.cc
#
# decoder.h declares the generated decoder parser class while
# decoder.cc contains the implementation of that decoder class.
#
# For testing purposes (see dgen_test_output.py) different rules are
# applied. Note: It may be worth reading dgen_test_output.py preamble
# to get a better understanding of decoder actions, and why we need
# the "action_filter" methods.
"""The current command line arguments to use"""
_cl_args = {}
NEWLINE_STR="""
"""
COMMENTED_NEWLINE_STR="""
//"""
# Defines the header for decoder.h
H_HEADER="""%(FILE_HEADER)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/decode.h"
#include "%(FILENAME_BASE)s_actuals.h"
namespace nacl_arm_dec {
"""
DECODER_DECLARE_HEADER="""
// Defines a decoder class selector for instructions.
class %(decoder_name)s : DecoderState {
public:
explicit %(decoder_name)s();
// Parses the given instruction, returning the decoder to use.
virtual const ClassDecoder& decode(const Instruction) const;
// Returns the class decoder to use to process the fictitious instruction
// that is inserted before the first instruction in the code block by
// the validator.
const ClassDecoder &fictitious_decoder() const {
return %(fictitious_decoder)s_instance_;
}
private:
"""
DECODER_DECLARE_METHOD_COMMENTS="""
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction.
"""
DECODER_DECLARE_METHOD="""
inline const ClassDecoder& decode_%(table_name)s(
const Instruction inst) const;
"""
DECODER_DECLARE_FIELD_COMMENTS="""
// The following fields define the set of class decoders
// that can be returned by the API function "decode". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be built once (and reused
// for each call to "decode")."""
DECODER_DECLARE_FIELD="""
const %(decoder)s %(decoder)s_instance_;"""
DECODER_DECLARE_FOOTER="""
};
"""
H_FOOTER="""
} // namespace nacl_arm_dec
#endif // %(IFDEF_NAME)s
"""
def generate_h(decoder, decoder_name, filename, out, cl_args):
"""Entry point to the decoder for .h file.
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.h')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'IFDEF_NAME': dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('.h')],
'decoder_name': decoder_name,
}
out.write(H_HEADER % values)
values['fictitious_decoder'] = (
decoder.get_value('FictitiousFirst').actual())
out.write(DECODER_DECLARE_HEADER % values)
out.write(DECODER_DECLARE_METHOD_COMMENTS)
for table in decoder.tables():
values['table_name'] = table.name
out.write(DECODER_DECLARE_METHOD % values)
out.write(DECODER_DECLARE_FIELD_COMMENTS)
for action in decoder.action_filter(['actual']).decoders():
values['decoder'] = action.actual()
out.write(DECODER_DECLARE_FIELD % values)
out.write(DECODER_DECLARE_FOOTER % values)
out.write(H_FOOTER % values)
# Defines the header for DECODER.h
CC_HEADER="""%(FILE_HEADER)s
#include "%(header_filename)s"
namespace nacl_arm_dec {
"""
CONSTRUCTOR_HEADER="""
%(decoder_name)s::%(decoder_name)s() : DecoderState()"""
CONSTRUCTOR_FIELD_INIT="""
, %(decoder)s_instance_()"""
CONSTRUCTOR_FOOTER="""
{}
"""
METHOD_HEADER="""
// Implementation of table: %(table_name)s.
// Specified by: %(citation)s
const ClassDecoder& %(decoder_name)s::decode_%(table_name)s(
const Instruction inst) const
{"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
METHOD_DISPATCH_CLASS_DECODER="""
return %(decoder)s_instance_;"""
METHOD_DISPATCH_SUBMETHOD="""
return decode_%(subtable_name)s(inst);"""
METHOD_DISPATCH_CLOSE="""
}
"""
METHOD_FOOTER="""
// Catch any attempt to fall though ...
return %(not_implemented)s_instance_;
}
"""
DECODER_METHOD_HEADER="""
const ClassDecoder& %(decoder_name)s::decode(const Instruction inst) const {"""
DECODER_METHOD_TRACE="""
fprintf(stderr, "Parsing %%08x\\n", inst.Bits());"""
DECODER_METHOD_FOOTER="""
return decode_%(entry_table_name)s(inst);
}
"""
CC_FOOTER="""
} // namespace nacl_arm_dec
"""
def generate_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the decoder in .cc file
Args:
decoder: The decoder defined by the list of Table objects to
process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
named_decoders: If true, generate a decoder state with named
instances.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
assert filename.endswith('.cc')
_cl_args = cl_args
# Before starting, remove all testing information from the parsed
# tables.
decoder = decoder.action_filter(['actual'])
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'header_filename': filename[:-2] + 'h',
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(CC_HEADER % values)
_generate_constructors(decoder, values, out)
_generate_methods(decoder, values, out)
out.write(DECODER_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(DECODER_METHOD_TRACE % values)
out.write(DECODER_METHOD_FOOTER % values)
out.write(CC_FOOTER % values)
def _generate_constructors(decoder, values, out):
out.write(CONSTRUCTOR_HEADER % values)
for decoder in decoder.action_filter(['actual']).decoders():
values['decoder'] = decoder.actual()
out.write(CONSTRUCTOR_FIELD_INIT % values)
out.write(CONSTRUCTOR_FOOTER % values)
def _generate_methods(decoder, values, out):
global _cl_args
for table in decoder.tables():
# Add the default row as the last in the optimized row, so that
# it is applied if all other rows do not.
opt_rows = sorted(dgen_opt.optimize_rows(table.rows(False)))
if table.default_row:
opt_rows.append(table.default_row)
opt_rows = table.add_column_to_rows(opt_rows)
print ("Table %s: %d rows minimized to %d"
% (table.name, len(table.rows()), len(opt_rows)))
values['table_name'] = table.name
values['citation'] = table.citation
out.write(METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(METHOD_HEADER_TRACE % values)
# Add message to stop compilation warnings if this table
# doesn't require subtables to select a class decoder.
if not table.methods():
out.write("\n UNREFERENCED_PARAMETER(inst);")
count = 0
for row in opt_rows:
count = count + 1
# Each row consists of a set of bit patterns defining if the row
# is applicable. Convert this into a sequence of anded C test
# expressions. For example, convert the following pair of bit
# patterns:
#
# xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxx0101
#
# Each instruction is masked to get the the bits, and then
# tested against the corresponding expected bits. Hence, the
# above example is converted to:
#
# ((inst & 0x0F000000) != 0x0C000000) &&
# ((inst & 0x0000000F) != 0x00000005)
out.write(METHOD_DISPATCH_BEGIN %
row.patterns[0].to_commented_bool())
for p in row.patterns[1:]:
out.write(METHOD_DISPATCH_CONTINUE % p.to_commented_bool())
out.write(METHOD_DISPATCH_END)
if _cl_args.get('trace') == 'True':
out.write(METHOD_DISPATCH_TRACE % count)
if row.action.__class__.__name__ == 'DecoderAction':
values['decoder'] = row.action.actual()
out.write(METHOD_DISPATCH_CLASS_DECODER % values)
elif row.action.__class__.__name__ == 'DecoderMethod':
values['subtable_name'] = row.action.name
out.write(METHOD_DISPATCH_SUBMETHOD % values)
else:
raise Exception('Bad table action: %s' % repr(row.action))
out.write(METHOD_DISPATCH_CLOSE % values)
values['not_implemented'] = decoder.get_value('NotImplemented').actual()
out.write(METHOD_FOOTER % values)
| 2.296875 | 2 |
compose/progress_stream.py | ilinum/compose | 1 | 7192 | from __future__ import absolute_import
from __future__ import unicode_literals
from compose import utils
class StreamOutputError(Exception):
pass
def stream_output(output, stream):
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
stream = utils.get_output_stream(stream)
all_events = []
lines = {}
diff = 0
for event in utils.json_stream(output):
all_events.append(event)
is_progress_event = 'progress' in event or 'progressDetail' in event
if not is_progress_event:
print_output_event(event, stream, is_terminal)
stream.flush()
continue
if not is_terminal:
continue
# if it's a progress event and we have a terminal, then display the progress bars
image_id = event.get('id')
if not image_id:
continue
if image_id not in lines:
lines[image_id] = len(lines)
stream.write("\n")
diff = len(lines) - lines[image_id]
# move cursor up `diff` rows
stream.write("%c[%dA" % (27, diff))
print_output_event(event, stream, is_terminal)
if 'id' in event:
# move cursor back down
stream.write("%c[%dB" % (27, diff))
stream.flush()
return all_events
def print_output_event(event, stream, is_terminal):
if 'errorDetail' in event:
raise StreamOutputError(event['errorDetail']['message'])
terminator = ''
if is_terminal and 'stream' not in event:
# erase current line
stream.write("%c[2K\r" % 27)
terminator = "\r"
elif 'progressDetail' in event:
return
if 'time' in event:
stream.write("[%s] " % event['time'])
if 'id' in event:
stream.write("%s: " % event['id'])
if 'from' in event:
stream.write("(from %s) " % event['from'])
status = event.get('status', '')
if 'progress' in event:
stream.write("%s %s%s" % (status, event['progress'], terminator))
elif 'progressDetail' in event:
detail = event['progressDetail']
total = detail.get('total')
if 'current' in detail and total:
percentage = float(detail['current']) / float(total) * 100
stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
else:
stream.write('%s%s' % (status, terminator))
elif 'stream' in event:
stream.write("%s%s" % (event['stream'], terminator))
else:
stream.write("%s%s\n" % (status, terminator))
def get_digest_from_pull(events):
for event in events:
status = event.get('status')
if not status or 'Digest' not in status:
continue
_, digest = status.split(':', 1)
return digest.strip()
return None
def get_digest_from_push(events):
for event in events:
digest = event.get('aux', {}).get('Digest')
if digest:
return digest
return None
| 2.390625 | 2 |
tests/test_db.py | beloglazov/openstack-neat | 34 | 7193 | # Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import datetime
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
class Db(TestCase):
@qc(1)
def insert_select():
db = db_utils.init_db('sqlite:///:memory:')
db.vms.insert().execute(uuid='test')
assert db.vms.select().execute().first()['uuid'] == 'test'
db.vm_resource_usage.insert().execute(vm_id=1, cpu_mhz=1000)
assert db.vm_resource_usage.select(). \
execute().first()['cpu_mhz'] == 1000
@qc(10)
def select_cpu_mhz_for_vm(
uuid=str_(of='abc123-', min_length=36, max_length=36),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
n=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
for mhz in cpu_mhz:
db.vm_resource_usage.insert().execute(
vm_id=vm_id,
cpu_mhz=mhz)
assert db.select_cpu_mhz_for_vm(uuid, n) == cpu_mhz[-n:]
@qc(10)
def select_last_cpu_mhz_for_vms(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for uuid, data in vms.items():
for value in data:
db.insert_vm_cpu_mhz({uuid: value})
if data:
res[uuid] = data[-1]
assert db.select_last_cpu_mhz_for_vms() == res
@qc(10)
def select_vm_id(
uuid1=str_(of='abc123-', min_length=36, max_length=36),
uuid2=str_(of='abc123-', min_length=36, max_length=36)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid1)
vm_id = result.inserted_primary_key[0]
assert db.select_vm_id(uuid1) == vm_id
assert db.select_vm_id(uuid2) == vm_id + 1
@qc(10)
def insert_vm_cpu_mhz(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(int_(min=1, max=3000),
list_(of=int_(min=1, max=3000),
min_length=0, max_length=10)),
min_length=0, max_length=5
)
):
db = db_utils.init_db('sqlite:///:memory:')
initial_data = []
data_to_submit = {}
final_data = {}
for uuid, data in vms.items():
vm_id = db.select_vm_id(uuid)
data_to_submit[uuid] = data[0]
final_data[uuid] = list(data[1])
final_data[uuid].append(data[0])
for cpu_mhz in data[1]:
initial_data.append({'vm_id': vm_id,
'cpu_mhz': cpu_mhz})
if initial_data:
db.vm_resource_usage.insert().execute(initial_data)
db.insert_vm_cpu_mhz(data_to_submit)
for uuid, data in final_data.items():
assert db.select_cpu_mhz_for_vm(uuid, 11) == data
@qc(1)
def update_host():
db = db_utils.init_db('sqlite:///:memory:')
db.update_host('host1', 3000, 4, 4000)
hosts = db.hosts.select().execute().fetchall()
assert len(hosts) == 1
host = hosts[0]
assert host['hostname'] == 'host1'
assert host['cpu_mhz'] == 3000
assert host['cpu_cores'] == 4
assert host['ram'] == 4000
db.update_host('host1', 3500, 8, 8000L)
hosts = db.hosts.select().execute().fetchall()
assert len(hosts) == 1
host = hosts[0]
assert host['hostname'] == 'host1'
assert host['cpu_mhz'] == 3500
assert host['cpu_cores'] == 8
assert host['ram'] == 8000L
@qc(10)
def select_cpu_mhz_for_host(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
n=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
host_id = db.update_host(hostname, 1, 1, 1)
for mhz in cpu_mhz:
db.host_resource_usage.insert().execute(
host_id=host_id,
cpu_mhz=mhz)
assert db.select_cpu_mhz_for_host(hostname, n) == cpu_mhz[-n:]
@qc(10)
def select_last_cpu_mhz_for_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=5, max_length=10),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for hostname, data in hosts.items():
db.update_host(hostname, 1, 1, 1)
for value in data:
db.insert_host_cpu_mhz(hostname, value)
if data:
res[hostname] = data[-1]
else:
res[hostname] = 0
assert db.select_last_cpu_mhz_for_hosts() == res
@qc(10)
def insert_host_cpu_mhz(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=1, max_length=10)
):
db = db_utils.init_db('sqlite:///:memory:')
db.update_host(hostname, 1, 1, 1)
for value in cpu_mhz:
db.insert_host_cpu_mhz(hostname, value)
assert db.select_cpu_mhz_for_host(hostname, len(cpu_mhz)) == cpu_mhz
@qc(1)
def select_host_characteristics():
db = db_utils.init_db('sqlite:///:memory:')
assert db.select_host_characteristics() == ({}, {}, {})
db.update_host('host1', 3000, 4, 4000)
db.update_host('host2', 3500, 8, 8000)
assert db.select_host_characteristics() == \
({'host1': 3000, 'host2': 3500},
{'host1': 4, 'host2': 8},
{'host1': 4000, 'host2': 8000})
@qc(1)
def select_host_id():
db = db_utils.init_db('sqlite:///:memory:')
host1_id = db.hosts.insert().execute(
hostname='host1',
cpu_mhz=1,
cpu_cores=1,
ram=1).inserted_primary_key[0]
host2_id = db.hosts.insert().execute(
hostname='host2',
cpu_mhz=1,
cpu_cores=1,
ram=1).inserted_primary_key[0]
assert db.select_host_id('host1') == host1_id
assert db.select_host_id('host2') == host2_id
@qc(1)
def select_host_ids():
db = db_utils.init_db('sqlite:///:memory:')
assert db.select_host_ids() == {}
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
assert db.select_host_ids() == hosts
@qc(1)
def cleanup_vm_resource_usage(
uuid=str_(of='abc123-', min_length=36, max_length=36)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
time = datetime.datetime.today()
for i in range(10):
db.vm_resource_usage.insert().execute(
vm_id=1,
cpu_mhz=i,
timestamp=time.replace(second=i))
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(10)
db.cleanup_vm_resource_usage(time.replace(second=5))
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(5, 10)
@qc(1)
def cleanup_host_resource_usage(
hostname=str_(of='abc123', min_length=5, max_length=10)
):
db = db_utils.init_db('sqlite:///:memory:')
host_id = db.update_host(hostname, 1, 1, 1)
time = datetime.datetime.today()
for i in range(10):
db.host_resource_usage.insert().execute(
host_id=1,
cpu_mhz=i,
timestamp=time.replace(second=i))
assert db.select_cpu_mhz_for_host(hostname, 100) == range(10)
db.cleanup_host_resource_usage(time.replace(second=5))
assert db.select_cpu_mhz_for_host(hostname, 100) == range(5, 10)
def test_insert_host_states(self):
db = db_utils.init_db('sqlite:///:memory:')
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
db.insert_host_states({'host1': 0, 'host2': 1})
db.insert_host_states({'host1': 0, 'host2': 0})
db.insert_host_states({'host1': 1, 'host2': 1})
result = db.host_states.select().execute().fetchall()
host1 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host1'],
result), key=lambda x: x[0])]
self.assertEqual(host1, [0, 0, 1])
host2 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host2'],
result), key=lambda x: x[0])]
self.assertEqual(host2, [1, 0, 1])
@qc(10)
def select_host_states(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data:
res[host] = data[-1]
else:
res[host] = 1
assert db.select_host_states() == res
@qc(10)
def select_active_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = []
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data and data[-1] == 1 or not data:
res.append(host)
assert set(db.select_active_hosts()) == set(res)
@qc(10)
def select_inactive_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
hosts = {'1ab': [0], '3222': [0, 0, 1, 1, 1, 1, 0, 0], 'b222b': [0, 0, 1, 1, 1, 0, 1]}
db = db_utils.init_db('sqlite:///:memory:')
res = []
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data and data[-1] == 0:
res.append(host)
assert set(db.select_inactive_hosts()) == set(res)
def test_insert_host_overload(self):
db = db_utils.init_db('sqlite:///:memory:')
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
db.insert_host_overload('host2', False)
db.insert_host_overload('host1', True)
db.insert_host_overload('host1', False)
db.insert_host_overload('host2', True)
result = db.host_overload.select().execute().fetchall()
host1 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host1'],
result), key=lambda x: x[0])]
self.assertEqual(host1, [1, 0])
host2 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host2'],
result), key=lambda x: x[0])]
self.assertEqual(host2, [0, 1])
@qc(1)
def insert_select():
db = db_utils.init_db('sqlite:///:memory:')
db.vms.insert().execute(uuid='x' * 36).inserted_primary_key[0]
vm_id = db.vms.insert().execute(uuid='vm' * 18).inserted_primary_key[0]
host_id = db.update_host('host', 1, 1, 1)
db.insert_vm_migration('vm' * 18, 'host')
result = db.vm_migrations.select().execute().first()
assert result[1] == vm_id
assert result[2] == host_id
| 2.0625 | 2 |
libs/BIDS.py | GuillermoPerez32/EE2BIDS_backend | 0 | 7194 | import os
from bids_validator import BIDSValidator
def validate(bids_directory):
print('- Validate: init started.')
file_paths = []
result = []
validator = BIDSValidator()
for path, dirs, files in os.walk(bids_directory):
for filename in files:
if filename == '.bidsignore':
continue
if filename.endswith('_annotations.tsv'):
continue
if filename.endswith('_annotations.json'):
continue
temp = os.path.join(path, filename)
file_paths.append(temp[len(bids_directory):len(temp)])
result.append(validator.is_bids(temp[len(bids_directory):len(temp)]))
# print(validator.is_bids(temp[len(bids_directory):len(temp)]))
return file_paths, result
| 2.78125 | 3 |
src/python/triangula/chassis.py | peterbrazil/brazil | 0 | 7195 | from math import cos, sin, degrees, radians, pi
from time import time
from euclid import Vector2, Point2
from numpy import array as np_array
from numpy.linalg import solve as np_solve
__author__ = 'tom'
def test():
chassis = HoloChassis(wheels=[
HoloChassis.OmniWheel(position=Point2(1, 0), angle=0, radius=60),
HoloChassis.OmniWheel(position=Point2(-1, 0), angle=0, radius=60)]
)
print chassis.get_wheel_speeds(Motion(translation=Vector2(0, 0), rotation=0.5))
print chassis.get_wheel_speeds(Motion(translation=Vector2(0, 0), rotation=0.5), origin=Point2(1, 0))
def rotate_point(point, angle, origin=None):
"""
Rotate a Point2 around another Point2
:param euclid.Point2 point:
The point to rotate
:param float angle:
Angle in radians, clockwise rotation
:param euclid.Point2 origin:
Origin of the rotation, defaults to (0,0) if not specified
:return:
A new :class:`euclid.Point2` containing the rotated input point
"""
if origin is None:
origin = Point2(0, 0)
s = sin(-angle)
c = cos(-angle)
return Point2(c * (point.x - origin.x) - s * (point.y - origin.y) + origin.x,
s * (point.x - origin.x) + c * (point.y - origin.y) + origin.y)
def rotate_vector(vector, angle, origin=None):
"""
Rotate a :class:`euclid.Vector2` around a :class:`euclid.Point2`
:param euclid.Vector2 vector:
The vector to rotate
:param float angle:
Angle in radians, clockwise rotation
:param euclid.Point2 origin:
Origin of the rotation, defaults to (0,0) if not specified
:return:
A new :class:`euclid.Point2` containing the rotated input point
"""
if origin is None:
origin = Point2(0, 0)
s = sin(-angle)
c = cos(-angle)
return Vector2(c * (vector.x - origin.x) - s * (vector.y - origin.y) + origin.x,
s * (vector.x - origin.x) + c * (vector.y - origin.y) + origin.y)
def smallest_difference(a, b, max_value=2 * pi):
"""
Given two floats, a and b, and a maximum possible value for both a and b, calculate the smallest delta from a to b.
For example, if a=1.0, b=2.5 and max_value=2.6, this should return -1.1, as subtracting 1.1 from a would result in
-0.1, which will then be transformed to 2.5 after taking its modulus with 2.6. If max_value was 10, it would return
+1.5, as this is the lower magnitude delta needed to go from 1.0 to 2.5. This function is used when calculating the
shortest delta between two pose orientations, for this reason the max_value defaults to 2*pi for use when working
in radians.
If either a or b are less than zero or greater than the maximum value they will be treated as a % max_value or b %
max_value respectively for the purposes of this calculation.
:param float a:
First value (see above)
:param b:
Second value (see above)
:param max_value:
Modulus, defaults to 2*pi if not specified
:return:
A value d such that (a + d) % max_value == b, and abs(d) is minimal (as there would be an infinite number of
possible d that satisfy this relationship).
"""
mod_a = a % max_value
mod_b = b % max_value
if abs(mod_a - mod_b) <= max_value / 2:
return mod_b - mod_a
elif mod_a >= mod_b:
return mod_b + (max_value - mod_a)
else:
return -(mod_a + (max_value - mod_b))
def get_regular_triangular_chassis(wheel_distance, wheel_radius, max_rotations_per_second):
"""
Build a HoloChassis object with three wheels, each identical in size and maximum speed. Each wheel is positioned
at the corner of a regular triangle, and with direction perpendicular to the normal vector at that corner.
:param wheel_distance:
Distance in millimetres between the contact points of each pair of wheels (i.e. the length of each edge of the
regular triangle)
:param wheel_radius:
Wheel radius in millimetres
:param max_rotations_per_second:
Maximum wheel speed in revolutions per second
:return:
An appropriately configured HoloChassis
"""
point = Point2(0, cos(radians(30)) * wheel_distance / 2.0)
vector = Vector2(-2 * pi * wheel_radius, 0)
# Pink
wheel_a = HoloChassis.OmniWheel(
position=point,
vector=vector,
max_speed=max_rotations_per_second)
# Yellow
wheel_b = HoloChassis.OmniWheel(
position=rotate_point(point, pi * 2 / 3),
vector=rotate_vector(vector, pi * 2 / 3),
max_speed=max_rotations_per_second)
# Green
wheel_c = HoloChassis.OmniWheel(
position=rotate_point(point, pi * 4 / 3),
vector=rotate_vector(vector, pi * 4 / 3),
max_speed=max_rotations_per_second)
return HoloChassis(wheels=[wheel_a, wheel_b, wheel_c])
class WheelSpeeds:
"""
A simple container to hold desired wheel speeds, and to indicate whether any speeds were scaled back due to
impossibly high values.
"""
def __init__(self, speeds, scaling):
"""
Create a new wheel speeds container
:param speeds:
A sequence of float values, one per wheel, in revolutions per second
:param float scaling:
If a requested translation or rotation was too fast for the chassis to perform, it will return an instance
of this class with the scaling set to a value greater than 1.0. This indicates that it was unable to
provide the requested trajectory but has instead provided the highest magnitude one possible. This parameter
then contains the proportion of the requested trajectory that was possible to provide. For example, if
the motion requested was a translation of 10mm/s in the X axis and a rotation of 10 radians per second, but
on calculation this resulted in excessive wheel speeds which weren't possible, it might be scaled back to
6mm/s on X and 6 radians per second - the motion is proportionately the same just slower, and in this case
the scaling value would be 0.6.
"""
self.speeds = speeds
self.scaling = scaling
def __str__(self):
return 'WheelSpeeds[ speeds={}, scaling={} ]'.format(self.speeds, self.scaling)
class Motion:
"""
A container to hold the translation and rotation vector representing the robot's motion. This is always expressed
in the robot's coordinate frame, so a translation component of 0,1 always means the robot is heading forwards,
irrespective of the current orientation of the robot (i.e. if the robot was turned 90 degrees in world space this
0,1 motion would be a movement along the X axis in world space, but the Y axis in robot space). The rotation
component of the motion is expressed in radians per second, positive values corresponding to clockwise rotation
when viewed from the direction relative to the plane such that X is positive to the right and Y positive upwards.
"""
def __init__(self, translation=None, rotation=0):
"""
Constructor
:param euclid.Vector2 translation:
Vector2 representing the translation component in robot coordinate space of the motion. Defaults to
Vector2(0,0)
:param float rotation:
Rotation in radians per second. Defaults to 0.
"""
if translation is not None:
self.translation = translation
else:
self.translation = Vector2(0, 0)
self.rotation = rotation
def __str__(self):
return 'Motion[ x={}, y={}, theta={} (deg={}) ]'.format(self.translation.x, self.translation.y, self.rotation,
degrees(self.rotation))
class DeadReckoning:
"""
Encapsulates the logic required to track the robot's position in world space using wheel encoders and chassis
kinematics. To update the state of this object you need to call the update_from_counts function - this will
compute the difference in counts for each wheel, and from this derive the rotational speed for each wheel since
the last measurement. The :class:`triangula.chassis.HoloChassis` is then used to convert these speeds into an arc,
with the assumption that wheel speeds were constant during the time interval. This arc is used to update the
:class:`triangula.chassis.Pose` representing the current best estimate of the robot's position.
Because this is in effect integrating over sensor readings, any errors, particularly in the chassis geometry or
dimensions, or in the number of counts per revolution (for example if the gearing isn't quite what you think it is
or there's enough slop in the gearbox that readings can drift) will accumulate over time. To mitigate this, if you
have precise instantaneous information such as a compass reading every few seconds, these readings can be used to
explicitly set the position, orientation, or both of the :class:`triangula.chassis.Pose` tracked by this class.
As there's an implicit assumption that wheel speeds are constant between encoder readings, this class will yield
more accurate results when updated frequently. The exact optimal update frequency will depend on the encoder
resolutions, chassis geometry etc. Some manual tuning may be required.
"""
def __init__(self, chassis, counts_per_revolution=64 * 19, max_count_value=1 << 15):
"""
Constructor
:param triangula.chassis.HoloChassis chassis:
The :class:`triangula.chassis.HoloChassis` to be used to define kinematics for this DeadReckoning
:param float counts_per_revolution:
The number of counts registered by the wheel encoders per revolution of the wheel. Defaults to 64*19 to
be the 64 count encoder fitted to a 19:1 reduction gearbox.
:param int max_count_value:
The largest value read from the encoders, this is used to determine when we've wrapped around the zero
point, defaults to 1<<16 to reflect that count values are held in the microcontroller module as a uint16_t
"""
self.chassis = chassis
self.counts_per_revolution = counts_per_revolution
self.max_count_value = max_count_value
self.last_encoder_values = None
self.last_reading_time = None
self.pose = None
def reset(self):
"""
Clear the state of this :class:`triangula.chassis.DeadReckoning`
"""
self.last_encoder_values = None
self.last_reading_time = None
self.pose = None
def set_position(self, position):
"""
Explicitly set the position of the robot in world coordinates. Overrides the current value tracked by this
instance. Use this when you have better information and want to update the state accordingly.
:param euclid.Point2 position:
The new position to set, as a :class:`euclid.Point2`, coordinates are in mm
"""
self.pose.position = position
return self.pose
def set_orientation(self, orientation):
"""
Explicitly set the orientation of the robot in world coordinates. Use this to explicitly update the orientation,
for example when you have a sufficiently accurate compass fix that it can be used to eliminate any accumulated
errors built up by the dead reckoning algorithm.
:param float orientation:
The new orientation to set, in radians from the positive Y axis, clockwise rotations being positive. This
value will be normalised to the range 0-2PI
:return:
The current (updated) value of the :class:`triangula.chassis.Pose`
"""
self.pose.orientation = orientation % (2 * pi)
return self.pose
def update_from_counts(self, counts):
"""
Update the pose from a new set of encoder values
:param counts:
A list of encoder counts, one per wheel
:return:
The updated :class:`triangula.chassis.Pose` object (this is also modified in the internal state of the
DeadReckoning)
"""
reading_time = time()
if self.last_encoder_values is None:
self.last_encoder_values = counts
self.last_reading_time = reading_time
self.pose = Pose(Point2(0, 0), 0)
else:
time_delta = reading_time - self.last_reading_time
wheel_speeds = [smallest_difference(current_reading, last_reading, self.max_count_value) / (
self.counts_per_revolution * time_delta) for last_reading, current_reading
in zip(counts, self.last_encoder_values)]
motion = self.chassis.calculate_motion(speeds=wheel_speeds)
self.pose = self.pose.calculate_pose_change(motion, time_delta)
self.last_encoder_values = counts
self.last_reading_time = reading_time
return self.pose
class Pose:
"""
A container to hold the position as a Point2 along with orientation in radians, where 0 corresponds to the positive
Y axis (0,1). Orientation is expressed in radians, with positive values indicating a rotation from the positive Y
axis in the clockwise direction, i.e. a rotation of 0 is North, pi/2 East, pi South and 3pi/2 West.
"""
def __init__(self, position=None, orientation=0):
"""
Constructor
:param euclid.Point2 position:
A Point2 containing the position of the centre of the robot. Defaults to Point2(0,0)
:param float orientation:
Orientation in radians, 0 being the positive Y axis, positive values correspond to clockwise rotations, i.e.
pi/4 is East. This value will be normalised to be between 0 and 2 * pi. Defaults to 0
"""
if position is not None:
self.position = position
else:
self.position = Point2(0, 0)
self.orientation = orientation % (2 * pi)
def distance_to_pose(self, to_pose):
"""
Return the distance to the other pose position
:param triangula.chassis.Pose to_pose:
The target pose
"""
return abs(self.position - to_pose.position)
def is_close_to(self, to_pose, max_distance=0.001, max_orientation_difference=radians(1)):
"""
Check whether we're close to the specified pose, defining closeness as both distance on the plane and difference
in orientation.
:param to_pose:
The target pose
:param max_distance:
Maximum distance within which we'll count as being close, defaults to 0.001
:param max_orientation_difference:
Maximum number of radians we can be off the target pose's orientation to count as close, defaults to 1
degree (calculated with ``radians(1)``)
:return:
True if this pose is regarded as close to the other, False otherwise
"""
if self.distance_to_pose(to_pose) > max_distance:
return False
elif abs(smallest_difference(self.orientation, to_pose.orientation)) > max_orientation_difference:
return False
else:
return True
def translate(self, vector):
"""
Create a new pose, with the same orientation as this one and the specified translation applied to its position.
:param euclid.Vector2 vector:
Vector by which the position of this pose should be translated when creating the new Pose
:return:
Returns the new Pose
"""
return Pose(position=self.position + vector, orientation=self.orientation)
def pose_to_pose_vector(self, to_pose):
"""
Calculates the Vector2, in robot coordinate space (remember that Pose objects use world coordinates!) that
represents the translation required to move from this Pose to the specified target Pose.
:param triangula.chassis.Pose to_pose:
A target :class:`triangula.chassis.Pose`, the resultant vector in robot space will translate the robot to
the position contained in this pose. Note that this does not take any account of the orientation component
of the to_pose, only the starting one.
:return:
A :class:`euclid.Vector2` containing the translation part, in robot space, of the motion required to move
from this Pose to the target.
"""
return rotate_vector(
vector=Vector2(to_pose.position.x - self.position.x, to_pose.position.y - self.position.y),
angle=-self.orientation)
def pose_to_pose_motion(self, to_pose, time_seconds):
"""
Calculates a Motion which should be applied to the current Pose to move the robot towards the target, such that
it should hit the target at no less than time_seconds into the future. This function must be called on any Pose
update, i.e. from a dead reckoning module, as it doesn't do any course planning (it would, for example, be
possible to calculate a single constant motion to move in an arc to the target Pose, but this would be rather
inefficient, better to incrementally home in on the target by repeatedly calling this function). To move as
fast as possible to the target, set the time to something implausibly small, then use the chassis functions
to limit the resultant motion to the range possible for the chassis. This would require some kind of motion
limit to avoid skidding and messing up the Pose calculation logic.
:param to_pose:
A target :class:`triangula.chassis.Pose`
:param time_seconds:
A the minimum number of seconds to transition to the target pose.
:return:
A :class:`triangula.chassis.Motion` containing the motion required to attain the target pose in the
specified time. This is highly likely to be impossible, in which case using the chassis functions to
determine the wheel power and extract the scaling factor will give the actual time (ignoring acceleration
limits) to transition to the target.
"""
translation = self.pose_to_pose_vector(to_pose=to_pose)
rotation = smallest_difference(self.orientation, to_pose.orientation)
return Motion(translation=translation / time_seconds, rotation=rotation / time_seconds)
def calculate_pose_change(self, motion, time_delta):
"""
Given this as the starting Pose, a Motion and a time in seconds, calculate the resultant Pose at the end of the
time interval.
This makes use of the fact that if you travel in a consistent direction while turning at a constant rate you
will describe an arc. By calculating the centre point of this arc we can simply rotate the starting pose around
this centre point. This is considerably simpler than integrating over the motion 3-vector. A special case is
used to avoid division by zero errors when there is no rotation component to the motion.
:param triangula.chassis.Motion motion:
The motion of the robot, assumed to be constant for the duration of the time interval. The motion is
expressed in the robot's coordinate frame, so a translation of (0,1) is always a forward motion,
irrespective of the current orientation.
:param float time_delta:
The time in seconds during which the specified motion should be applied.
:return:
A :class:`triangula.chassis.Pose` which represents resultant pose after applying the supplied motion for the
given time.
"""
# Total delta in orientation angle over the time interval
orientation_delta = motion.rotation * time_delta
# Scaled translation vector rotated into world coordinate space (motion uses robot space)
translation_vector_world = rotate_vector(motion.translation, self.orientation) * time_delta
':type : euclid.Vector2'
if orientation_delta == 0:
# No orientation, trivially add the rotated, scaled, translation vector to the current pose
return self.translate(translation_vector_world)
else:
centre_of_rotation = self.position + translation_vector_world.cross() / orientation_delta
':type : euclid.Point2'
final_position = rotate_point(self.position, angle=orientation_delta, origin=centre_of_rotation)
return Pose(position=final_position, orientation=self.orientation + orientation_delta)
def __str__(self):
return 'Pose[x={}, y={}, orientation={} (deg={})]'.format(self.position.x, self.position.y, self.orientation,
degrees(self.orientation))
class HoloChassis:
"""
An assembly of wheels at various positions and angles, which can be driven independently to create a holonomic drive
system. A holonomic system is one where number of degrees of freedom in the system is equal to the number of
directly controllable degrees of freedom, so for a chassis intended to move in two dimensions the degrees of freedom
are two axes of translation and one of rotation. For a full holonomic system we therefore need at least three wheels
defined.
"""
def __init__(self, wheels):
"""
Create a new chassis, specifying a set of wheels.
:param wheels:
A sequence of :class:`triangula.chassis.HoloChassis.OmniWheel` objects defining the wheels for this chassis.
"""
self.wheels = wheels
self._matrix_coefficients = np_array([[wheel.co_x, wheel.co_y, wheel.co_theta] for wheel in self.wheels])
def calculate_motion(self, speeds):
"""
Invert the motion to speed calculation to obtain the actual linear and angular velocity of the chassis given
a vector of wheel speeds. See http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.linalg.solve.html
:param speeds:
An array of wheel speeds, expressed as floats with units of radians per second, positive being towards
the wheel vector.
:return:
A :class:`triangula.chassis.Motion` object containing the calculated translation and rotation in the robot's
coordinate space.
"""
motion_array = np_solve(self._matrix_coefficients, np_array(speeds))
return Motion(Vector2(x=float(motion_array[0]),
y=float(motion_array[1])),
rotation=float(motion_array[2]))
def get_max_translation_speed(self):
"""
Calculate the maximum translation speed, assuming all directions are equivalent and that there is no rotation
component to the motion.
:return:
Maximum speed in millimetres per second as a float
"""
unrealistic_speed = 10000.0
scaling = self.get_wheel_speeds(Motion(translation=Vector2(0, unrealistic_speed), rotation=0)).scaling
return unrealistic_speed * scaling
def get_max_rotation_speed(self):
"""
Calculate the maximum rotation speed around the origin in radians per second, assuming no translation motion
at the same time.
:return:
Maximum radians per second as a float
"""
unrealistic_speed = 2 * pi * 100
scaling = self.get_wheel_speeds(Motion(translation=Vector2(0, 0), rotation=unrealistic_speed)).scaling
return unrealistic_speed * scaling
def get_wheel_speeds(self, motion, origin=Point2(x=0, y=0)):
"""
Calculate speeds to drive each wheel in the chassis at to attain the specified rotation / translation 3-vector.
:param triangula.chassis.Motion motion:
Desired motion of the robot chassis
:param euclid.Point2 origin:
Optional, can define the centre of rotation to be something other than 0,0. Units are in millimetres.
Defaults to rotating around x=0, y=0.
:return:
A :class:`triangula.chassis.WheelSpeeds` containing both the target wheel speeds and the scaling, if any,
which was required to bring those speeds into the allowed range for all wheels. This prevents unexpected
motion in cases where only a single wheel is being asked to turn too fast, in such cases all wheel speeds
will be scaled back such that the highest is within the bounds allowed for that particular wheel. This
can accommodate wheels with different top speeds.
"""
def velocity_at(point):
"""
Compute the velocity as a Vector2 at the specified point given the enclosing translation and rotation values
Method: Normalise the vector from the origin to the point, then take the cross of itself to produce a unit
vector with direction that of a rotation around the origin. Scale this by the distance from the origin and
by the rotation in radians per second, then simply add the translation vector.
:param euclid.Point2 point:
Point at which to calculate velocity
:return:
A :class:`euclid.Vector2` representing the velocity at the specified point in mm/s
"""
d = point - origin
return d.cross() * motion.rotation + motion.translation
wheel_speeds = list(wheel.speed(velocity_at(wheel.position)) for wheel in self.wheels)
scale = 1.0
for speed, wheel in zip(wheel_speeds, self.wheels):
if wheel.max_speed is not None and abs(speed) > wheel.max_speed:
wheel_scale = wheel.max_speed / abs(speed)
scale = min(scale, wheel_scale)
return WheelSpeeds(speeds=list(speed * scale for speed in wheel_speeds), scaling=scale)
class OmniWheel:
"""
Defines a single omni-wheel within a chassis assembly. Omni-wheels are wheels formed from rollers, where the
motion of the roller is perpendicular to the motion of the primary wheel. This is distinct from a mechanum wheel
where the rollers are at an angle (normally around 40-30 degrees) to the primary wheel. Omni-wheels must be
positioned on the chassis with non-parallel unit vectors, mechanum wheels can in some cases be positioned with
all unit vectors parallel.
A wheel has a location relative to the chassis centre and a vector describing the direction of motion of the
wheel when driven with a positive angular velocity. The location is specified in millimetres, and the magnitude
of the wheel vector should be equal to the number of millimetres travelled in a single revolution. This allows
for different sized wheels to be handled within the same chassis.
"""
def __init__(self, position, max_speed=0, angle=None, radius=None, vector=None):
"""
Create a new omni-wheel object, specifying the position and either a direction vector directly or the angle
in degrees clockwise from the position Y axis along with the radius of the wheel.
:param euclid.Point2 position:
The wheel's contact point with the surface, specified relative to the centre of the
chassis. Units are millimetres.
:param float max_speed:
The maximum number of revolutions per second allowed for this wheel. When calculating the wheel speeds
required for a given trajectory this value is used to scale back all motion if any wheel would have to
move at an impossible speed. If not specified this defaults to None, indicating that no speed limit
should be placed on this wheel.
:param angle:
The angle, specified in radians from the positive Y axis where positive values are clockwise from this
axis when viewed from above, of the direction of travel of the wheel when driven with a positive speed.
If this value is specified then radius must also be specified and dx,dy left as None.
:param radius:
The radius in millimetres of the wheel, measuring from the centre to the contact point with the surface,
this may be hard to determine for some wheels based on their geometry, particularly for wheels with
cylindrical rollers, as the radius will vary. For these cases it may be worth directly measuring the
circumference of the entire assembly and calculating radius rather than measuring directly. This is used
to determine the magnitude of the direction vector. If this is not None then the angle must also be
specified, and dx,dy left as None.
:param euclid.Vector2 vector:
2 dimensional vector defining the translation of the wheel's contact point after a full
revolution of the wheel.
"""
self.position = position
self.max_speed = max_speed
if angle is None and radius is None and vector is not None:
# Specify wheel based on direct vector """
self.vector = vector
elif angle is not None and radius is not None and vector is None:
# Specify based on angle from positive Y axis and radius """
circumference = 2 * pi * radius
self.vector = Vector2(sin(angle) * circumference, cos(angle) * circumference)
else:
raise ValueError('Must specify exactly one of angle and radius or translation vector')
self.vector_magnitude_squared = self.vector.magnitude_squared()
self.co_x = self.vector.x / self.vector_magnitude_squared
self.co_y = self.vector.y / self.vector_magnitude_squared
self.co_theta = (self.vector.x * self.position.y -
self.vector.y * self.position.x) / self.vector_magnitude_squared
def speed(self, velocity):
"""
Given a velocity at a wheel contact point, calculate the speed in revolutions per second at which the wheel
should be driven.
Method: we want to find the projection of the velocity onto the vector representing the drive of this wheel.
We store the vector representing a single revolution of travel as self.vector, so the projection onto this
would be velocity.dot(self.vector / abs(self.vector)). However, we want revolutions per second, so we must
then divide again by abs(self.vector), leading to
velocity.dot(self.vector / abs(self.vector))/abs(self.vector). Because the definition of the dot product is
the sum of x1*x2, y1*y2, ... any scalar applied to each x, y ... of a single vector can be moved outside
the dot product, so we can simplify as velocity.dot(self.vector) / abs(self.vector)^2. As the magnitude of
the vector is taken by sqrt(x^2+y^2) we can simply express this as (x^2+y^2), held in the convenient
function magnitude_squared(). So our final simplified form is
velocity.dot(self.vector) / self.vector.magnitude_squared(). For efficiency, and because self.vector doesn't
change, we can pre-compute this.
:param euclid.Vector2 velocity:
The velocity at the wheel's contact point with the surface, expressed in mm/s
:return:
Target wheel speed in rotations per second to hit the desired vector at the contact point.
"""
return velocity.dot(self.vector) / self.vector_magnitude_squared
| 3.421875 | 3 |
libs/PureCloudPlatformClientV2/models/management_unit.py | rocketbot-cl/genesysCloud | 1 | 7196 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ManagementUnit(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ManagementUnit - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'division': 'Division',
'business_unit': 'BusinessUnitReference',
'start_day_of_week': 'str',
'time_zone': 'str',
'settings': 'ManagementUnitSettingsResponse',
'metadata': 'WfmVersionedEntityMetadata',
'version': 'int',
'date_modified': 'datetime',
'modified_by': 'UserReference',
'self_uri': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'division': 'division',
'business_unit': 'businessUnit',
'start_day_of_week': 'startDayOfWeek',
'time_zone': 'timeZone',
'settings': 'settings',
'metadata': 'metadata',
'version': 'version',
'date_modified': 'dateModified',
'modified_by': 'modifiedBy',
'self_uri': 'selfUri'
}
self._id = None
self._name = None
self._division = None
self._business_unit = None
self._start_day_of_week = None
self._time_zone = None
self._settings = None
self._metadata = None
self._version = None
self._date_modified = None
self._modified_by = None
self._self_uri = None
@property
def id(self):
"""
Gets the id of this ManagementUnit.
The globally unique identifier for the object.
:return: The id of this ManagementUnit.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ManagementUnit.
The globally unique identifier for the object.
:param id: The id of this ManagementUnit.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this ManagementUnit.
:return: The name of this ManagementUnit.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ManagementUnit.
:param name: The name of this ManagementUnit.
:type: str
"""
self._name = name
@property
def division(self):
"""
Gets the division of this ManagementUnit.
The division to which this entity belongs.
:return: The division of this ManagementUnit.
:rtype: Division
"""
return self._division
@division.setter
def division(self, division):
"""
Sets the division of this ManagementUnit.
The division to which this entity belongs.
:param division: The division of this ManagementUnit.
:type: Division
"""
self._division = division
@property
def business_unit(self):
"""
Gets the business_unit of this ManagementUnit.
The business unit to which this management unit belongs
:return: The business_unit of this ManagementUnit.
:rtype: BusinessUnitReference
"""
return self._business_unit
@business_unit.setter
def business_unit(self, business_unit):
"""
Sets the business_unit of this ManagementUnit.
The business unit to which this management unit belongs
:param business_unit: The business_unit of this ManagementUnit.
:type: BusinessUnitReference
"""
self._business_unit = business_unit
@property
def start_day_of_week(self):
"""
Gets the start_day_of_week of this ManagementUnit.
Start day of week for scheduling and forecasting purposes. Moving to Business Unit
:return: The start_day_of_week of this ManagementUnit.
:rtype: str
"""
return self._start_day_of_week
@start_day_of_week.setter
def start_day_of_week(self, start_day_of_week):
"""
Sets the start_day_of_week of this ManagementUnit.
Start day of week for scheduling and forecasting purposes. Moving to Business Unit
:param start_day_of_week: The start_day_of_week of this ManagementUnit.
:type: str
"""
allowed_values = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
if start_day_of_week.lower() not in map(str.lower, allowed_values):
# print("Invalid value for start_day_of_week -> " + start_day_of_week)
self._start_day_of_week = "outdated_sdk_version"
else:
self._start_day_of_week = start_day_of_week
@property
def time_zone(self):
"""
Gets the time_zone of this ManagementUnit.
The time zone for the management unit in standard Olson format. Moving to Business Unit
:return: The time_zone of this ManagementUnit.
:rtype: str
"""
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
"""
Sets the time_zone of this ManagementUnit.
The time zone for the management unit in standard Olson format. Moving to Business Unit
:param time_zone: The time_zone of this ManagementUnit.
:type: str
"""
self._time_zone = time_zone
@property
def settings(self):
"""
Gets the settings of this ManagementUnit.
The configuration settings for this management unit
:return: The settings of this ManagementUnit.
:rtype: ManagementUnitSettingsResponse
"""
return self._settings
@settings.setter
def settings(self, settings):
"""
Sets the settings of this ManagementUnit.
The configuration settings for this management unit
:param settings: The settings of this ManagementUnit.
:type: ManagementUnitSettingsResponse
"""
self._settings = settings
@property
def metadata(self):
"""
Gets the metadata of this ManagementUnit.
Version info metadata for this management unit. Deprecated, use settings.metadata
:return: The metadata of this ManagementUnit.
:rtype: WfmVersionedEntityMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this ManagementUnit.
Version info metadata for this management unit. Deprecated, use settings.metadata
:param metadata: The metadata of this ManagementUnit.
:type: WfmVersionedEntityMetadata
"""
self._metadata = metadata
@property
def version(self):
"""
Gets the version of this ManagementUnit.
The version of the underlying entity. Deprecated, use field from settings.metadata instead
:return: The version of this ManagementUnit.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ManagementUnit.
The version of the underlying entity. Deprecated, use field from settings.metadata instead
:param version: The version of this ManagementUnit.
:type: int
"""
self._version = version
@property
def date_modified(self):
"""
Gets the date_modified of this ManagementUnit.
The date and time at which this entity was last modified. Deprecated, use field from settings.metadata instead. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The date_modified of this ManagementUnit.
:rtype: datetime
"""
return self._date_modified
@date_modified.setter
def date_modified(self, date_modified):
"""
Sets the date_modified of this ManagementUnit.
The date and time at which this entity was last modified. Deprecated, use field from settings.metadata instead. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param date_modified: The date_modified of this ManagementUnit.
:type: datetime
"""
self._date_modified = date_modified
@property
def modified_by(self):
"""
Gets the modified_by of this ManagementUnit.
The user who last modified this entity. Deprecated, use field from settings.metadata instead
:return: The modified_by of this ManagementUnit.
:rtype: UserReference
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""
Sets the modified_by of this ManagementUnit.
The user who last modified this entity. Deprecated, use field from settings.metadata instead
:param modified_by: The modified_by of this ManagementUnit.
:type: UserReference
"""
self._modified_by = modified_by
@property
def self_uri(self):
"""
Gets the self_uri of this ManagementUnit.
The URI for this object
:return: The self_uri of this ManagementUnit.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this ManagementUnit.
The URI for this object
:param self_uri: The self_uri of this ManagementUnit.
:type: str
"""
self._self_uri = self_uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 1.828125 | 2 |
harmony_tools/core/colors.py | a1fred/guitar_gammas | 1 | 7197 | COLOR_BLUE = '\033[0;34m'
COLOR_GREEN = '\033[0;32m'
COLOR_CYAN = '\033[0;36m'
COLOR_RED = '\033[0;31m'
COLOR_PURPLE = '\033[0;35m'
COLOR_BROWN = '\033[0;33m'
COLOR_YELLOW = '\033[1;33m'
COLOR_GRAY = '\033[1;30m'
COLOR_RESET = '\033[0m'
FG_COLORS = [
# COLOR_BLUE,
COLOR_GREEN,
# COLOR_CYAN,
# COLOR_RED,
# COLOR_PURPLE,
# COLOR_BROWN,
# COLOR_YELLOW,
]
def next_color(color):
assert color in FG_COLORS
index = FG_COLORS.index(color)
index += 1
try:
return FG_COLORS[index]
except IndexError:
index = 0
return FG_COLORS[index]
def c(string, color):
global COLOR_RESET
return f"{color}{string}{COLOR_RESET}"
| 2.53125 | 3 |
Common_Questions/TextBookQuestions/PythonCrashCourse/Chapter_8/8_5.py | tegamax/ProjectCode | 0 | 7198 | <reponame>tegamax/ProjectCode
'''
8-5. Cities: Write a function called describe_city() that accepts the name of a city and its country.
The function should print a simple sentence, such as Reykjavik is in Iceland. Give the parameter for the country a default value.
Call your function for three different cities, at least one of which is not in the default country.
'''
def describe_city(city,country='Iceland'):
cities = ['Reykjavik','Kópavogur','Reykjanesbær','Garðabær','Mosfellsbær','Hafnarfjörður']
for i in cities:
if i==city:
print(f'{i} is in Iceland')
break
else:
print(f'So you think {city} is a city Iceland?')
describe_city('Garðabær','country')
'''
Árborg
Akureyri
''' | 4.5625 | 5 |
tests/test_geometry_loader.py | trnielsen/nexus-constructor | 0 | 7199 | <filename>tests/test_geometry_loader.py<gh_stars>0
from nexus_constructor.geometry import OFFGeometryNoNexus
from nexus_constructor.geometry.geometry_loader import load_geometry_from_file_object
from nexus_constructor.off_renderer import repeat_shape_over_positions
from PySide2.QtGui import QVector3D
from io import StringIO
def test_GIVEN_off_file_containing_geometry_WHEN_loading_geometry_to_file_THEN_vertices_and_faces_loaded_are_the_same_as_the_file():
model = OFFGeometryNoNexus()
model.units = "m"
off_file = (
"OFF\n"
"# cube.off\n"
"# A cube\n"
"8 6 0\n"
"-0.500000 -0.500000 0.500000\n"
"0.500000 -0.500000 0.500000\n"
"-0.500000 0.500000 0.500000\n"
"0.500000 0.500000 0.500000\n"
"-0.500000 0.500000 -0.500000\n"
"0.500000 0.500000 -0.500000\n"
"-0.500000 -0.500000 -0.500000\n"
"0.500000 -0.500000 -0.500000\n"
"4 0 1 3 2\n"
"4 2 3 5 4\n"
"4 4 5 7 6\n"
"4 6 7 1 0\n"
"4 1 7 5 3\n"
"4 6 0 2 4\n"
)
load_geometry_from_file_object(StringIO(off_file), ".off", model.units, model)
assert model.vertices == [
QVector3D(-0.5, -0.5, 0.5),
QVector3D(0.5, -0.5, 0.5),
QVector3D(-0.5, 0.5, 0.5),
QVector3D(0.5, 0.5, 0.5),
QVector3D(-0.5, 0.5, -0.5),
QVector3D(0.5, 0.5, -0.5),
QVector3D(-0.5, -0.5, -0.5),
QVector3D(0.5, -0.5, -0.5),
]
assert model.faces == [
[0, 1, 3, 2],
[2, 3, 5, 4],
[4, 5, 7, 6],
[6, 7, 1, 0],
[1, 7, 5, 3],
[6, 0, 2, 4],
]
assert model.winding_order == [
0,
1,
3,
2,
2,
3,
5,
4,
4,
5,
7,
6,
6,
7,
1,
0,
1,
7,
5,
3,
6,
0,
2,
4,
]
assert model.winding_order_indices == [0, 4, 8, 12, 16, 20]
def test_GIVEN_stl_file_with_cube_geometry_WHEN_loading_geometry_THEN_all_faces_are_present():
length = 30
left_lower_rear = QVector3D(0, 0, 0)
right_lower_rear = QVector3D(length, 0, 0)
left_upper_rear = QVector3D(0, length, 0)
right_upper_rear = QVector3D(length, length, 0)
left_lower_front = QVector3D(0, 0, length)
right_lower_front = QVector3D(length, 0, length)
left_upper_front = QVector3D(0, length, length)
right_upper_front = QVector3D(length, length, length)
# faces on a cube with a right hand winding order
faces = [
[
left_lower_front,
left_lower_rear,
right_lower_rear,
right_lower_front,
], # bottom
[left_lower_front, left_upper_front, left_upper_rear, left_lower_rear], # left
[
left_upper_front,
left_lower_front,
right_lower_front,
right_upper_front,
], # front
[
right_upper_front,
right_lower_front,
right_lower_rear,
right_upper_rear,
], # right
[right_upper_rear, right_lower_rear, left_lower_rear, left_upper_rear], # rear
[left_upper_rear, left_upper_front, right_upper_front, right_upper_rear], # top
]
cube = """solid vcg
facet normal -1.000000e+00 0.000000e+00 0.000000e+00
outer loop
vertex 0.000000e+00 3.000000e+01 0.000000e+00
vertex 0.000000e+00 0.000000e+00 3.000000e+01
vertex 0.000000e+00 3.000000e+01 3.000000e+01
endloop
endfacet
facet normal -1.000000e+00 0.000000e+00 0.000000e+00
outer loop
vertex 0.000000e+00 0.000000e+00 0.000000e+00
vertex 0.000000e+00 0.000000e+00 3.000000e+01
vertex 0.000000e+00 3.000000e+01 0.000000e+00
endloop
endfacet
facet normal 1.000000e+00 -0.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 0.000000e+00 3.000000e+01
vertex 3.000000e+01 3.000000e+01 0.000000e+00
vertex 3.000000e+01 3.000000e+01 3.000000e+01
endloop
endfacet
facet normal 1.000000e+00 0.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 0.000000e+00 3.000000e+01
vertex 3.000000e+01 0.000000e+00 0.000000e+00
vertex 3.000000e+01 3.000000e+01 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 -1.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 0.000000e+00 0.000000e+00
vertex 3.000000e+01 0.000000e+00 3.000000e+01
vertex 0.000000e+00 0.000000e+00 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 -1.000000e+00 0.000000e+00
outer loop
vertex 0.000000e+00 0.000000e+00 0.000000e+00
vertex 3.000000e+01 0.000000e+00 3.000000e+01
vertex 0.000000e+00 0.000000e+00 3.000000e+01
endloop
endfacet
facet normal 0.000000e+00 1.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 3.000000e+01 3.000000e+01
vertex 3.000000e+01 3.000000e+01 0.000000e+00
vertex 0.000000e+00 3.000000e+01 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 1.000000e+00 0.000000e+00
outer loop
vertex 3.000000e+01 3.000000e+01 3.000000e+01
vertex 0.000000e+00 3.000000e+01 0.000000e+00
vertex 0.000000e+00 3.000000e+01 3.000000e+01
endloop
endfacet
facet normal 0.000000e+00 0.000000e+00 -1.000000e+00
outer loop
vertex 0.000000e+00 3.000000e+01 0.000000e+00
vertex 3.000000e+01 3.000000e+01 0.000000e+00
vertex 0.000000e+00 0.000000e+00 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 0.000000e+00 -1.000000e+00
outer loop
vertex 0.000000e+00 0.000000e+00 0.000000e+00
vertex 3.000000e+01 3.000000e+01 0.000000e+00
vertex 3.000000e+01 0.000000e+00 0.000000e+00
endloop
endfacet
facet normal 0.000000e+00 0.000000e+00 1.000000e+00
outer loop
vertex 3.000000e+01 3.000000e+01 3.000000e+01
vertex 0.000000e+00 3.000000e+01 3.000000e+01
vertex 0.000000e+00 0.000000e+00 3.000000e+01
endloop
endfacet
facet normal 0.000000e+00 0.000000e+00 1.000000e+00
outer loop
vertex 3.000000e+01 3.000000e+01 3.000000e+01
vertex 0.000000e+00 0.000000e+00 3.000000e+01
vertex 3.000000e+01 0.000000e+00 3.000000e+01
endloop
endfacet
endsolid vcg"""
geometry = load_geometry_from_file_object(StringIO(cube), ".stl", "m")
# 2 triangles per face, 6 faces in the cube
assert len(geometry.faces) == 6 * 2
assert geometry.winding_order_indices == [i * 3 for i in range(12)]
# each expected vertex is in the shape
for vertex in [
left_lower_rear,
right_lower_rear,
left_upper_rear,
right_upper_rear,
left_lower_front,
right_lower_front,
left_upper_front,
right_upper_front,
]:
assert vertex in geometry.vertices
# each face must be in the loaded geometry
for face in faces:
face_found = False
# each face could be split into triangles in one of two ways
for triangle_split in [
[[face[0], face[1], face[2]], [face[2], face[3], face[0]]],
[[face[1], face[2], face[3]], [face[3], face[0], face[1]]],
]:
triangle_matches = 0
# each triangle in the square's split must be in the loaded geometry for the square to be
for triangle in triangle_split:
# check the triangle against each rotation of each triangle in the geometry
for candidate_triangle_indices in geometry.faces:
a = geometry.vertices[candidate_triangle_indices[0]]
b = geometry.vertices[candidate_triangle_indices[1]]
c = geometry.vertices[candidate_triangle_indices[2]]
if (
triangle == [a, b, c]
or triangle == [b, c, a]
or triangle == [c, a, b]
):
triangle_matches += 1
if triangle_matches == 2:
face_found = True
assert face_found
def test_GIVEN_unrecognised_file_extension_WHEN_loading_geometry_THEN_returns_empty_geometry():
geometry = load_geometry_from_file_object(StringIO(), ".txt", "m")
assert len(geometry.vertices) == 0
assert len(geometry.faces) == 0
def get_dummy_OFF():
# A square with a triangle on the side
original_vertices = [
QVector3D(0, 0, 0),
QVector3D(0, 1, 0),
QVector3D(1, 1, 0),
QVector3D(1, 0, 0),
QVector3D(1.5, 0.5, 0),
]
original_faces = [[0, 1, 2, 3], [2, 3, 4]]
return OFFGeometryNoNexus(vertices=original_vertices, faces=original_faces)
def test_WHEN_generate_off_mesh_with_no_repeat_THEN_off_unchanged():
off_geometry = get_dummy_OFF()
positions = [QVector3D(0, 0, 0)]
faces, vertices = repeat_shape_over_positions(off_geometry, positions)
assert faces == off_geometry.faces
assert vertices == off_geometry.vertices
def test_WHEN_generate_off_mesh_with_three_copies_THEN_original_shape_remains():
off_geometry = get_dummy_OFF()
positions = [QVector3D(0, 0, 0), QVector3D(0, 0, 1), QVector3D(1, 0, 0)]
faces, vertices = repeat_shape_over_positions(off_geometry, positions)
assert faces[: len(off_geometry.faces)] == off_geometry.faces
assert vertices[: len(off_geometry.vertices)] == off_geometry.vertices
def _test_position_with_single_translation_helper(translation):
off_geometry = get_dummy_OFF()
positions = [QVector3D(0, 0, 0), translation]
faces, vertices = repeat_shape_over_positions(off_geometry, positions)
second_shape_faces = faces[len(off_geometry.faces) :]
second_shape_vertices = vertices[len(off_geometry.vertices) :]
# Faces will just be the same but every vertex added to be len(vertices)
shifted_faces = []
for face in second_shape_faces:
shifted_face = []
for vertex in face:
shifted_face.append(vertex - len(off_geometry.vertices))
shifted_faces.append(shifted_face)
assert shifted_faces == off_geometry.faces
return off_geometry.vertices, second_shape_vertices
def test_WHEN_generate_off_mesh_with_single_x_position_THEN_second_shape_just_translation_of_first():
(
original_vertices,
second_shape_vertices,
) = _test_position_with_single_translation_helper(QVector3D(1, 0, 0))
# Vertices will be the same by shifted by 1
for vertex in second_shape_vertices:
vertex.setX(vertex.x() - 1)
assert second_shape_vertices == original_vertices
def test_WHEN_generate_off_mesh_with_single_y_position_THEN_second_shape_just_translation_of_first():
(
original_vertices,
second_shape_vertices,
) = _test_position_with_single_translation_helper(QVector3D(0, 1, 0))
# Vertices will be the same by shifted by 1
for vertex in second_shape_vertices:
vertex.setY(vertex.y() - 1)
assert second_shape_vertices == original_vertices
def test_WHEN_generate_off_mesh_with_single_negative_z_position_THEN_second_shape_just_translation_of_first():
(
original_vertices,
second_shape_vertices,
) = _test_position_with_single_translation_helper(QVector3D(0, 0, -1))
# Vertices will be the same by shifted by 1
for vertex in second_shape_vertices:
vertex.setZ(vertex.z() + 1)
assert second_shape_vertices == original_vertices
def test_WHEN_generate_off_mesh_with_single_diagonal_position_THEN_second_shape_just_translation_of_first():
(
original_vertices,
second_shape_vertices,
) = _test_position_with_single_translation_helper(QVector3D(0, 1, -1))
for vertex in second_shape_vertices:
vertex.setZ(vertex.z() + 1)
vertex.setY(vertex.y() - 1)
assert second_shape_vertices == original_vertices
| 2.265625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.