repo_name
stringlengths
7
94
repo_path
stringlengths
4
237
repo_head_hexsha
stringlengths
40
40
content
stringlengths
10
680k
apis
stringlengths
2
840k
sauli6692/barbershop
employees/choices.py
862357bd78235e720b2e3b868d2423a57bb4e328
from django.utils.translation import ugettext_lazy as _ USER_TYPE_STAFF = 'STAFF' USER_TYPE_ADMIN = 'ADMIN' USER_TYPE_BARBER = 'BARBER' USER_TYPE_CHOICES = ( (USER_TYPE_STAFF, _('Dev')), (USER_TYPE_ADMIN, _('Admin')), (USER_TYPE_BARBER, _('Barber')), )
[((8, 22, 8, 30), 'django.utils.translation.ugettext_lazy', '_', ({(8, 24, 8, 29): '"""Dev"""'}, {}), "('Dev')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9, 22, 9, 32), 'django.utils.translation.ugettext_lazy', '_', ({(9, 24, 9, 31): '"""Admin"""'}, {}), "('Admin')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10, 23, 10, 34), 'django.utils.translation.ugettext_lazy', '_', ({(10, 25, 10, 33): '"""Barber"""'}, {}), "('Barber')", True, 'from django.utils.translation import ugettext_lazy as _\n')]
google-ar/chromium
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/html5test.py
2441c86a5fd975f09a6c30cddb57dfb7fc239699
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from common.chrome_proxy_shared_page_state import ChromeProxySharedPageState from telemetry.page import page as page_module from telemetry import story class HTML5TestPage(page_module.Page): def __init__(self, url, page_set): super(HTML5TestPage, self).__init__(url=url, page_set=page_set, shared_page_state_class=ChromeProxySharedPageState) class HTML5TestStorySet(story.StorySet): """ Chrome proxy test page for traffic over https. """ def __init__(self): super(HTML5TestStorySet, self).__init__() urls_list = [ 'http://html5test.com/', ] for url in urls_list: self.AddStory(HTML5TestPage(url, self))
[]
eliranM98/python_course
lessons/sqlite_example/database.py
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
""" in this example we want to create a user credentials database with: user_id & password logger showing connection logs, DB version, errors during fetching & executing """ import sqlite3 from lessons.sqlite_example.log import create as create_logger class Commands: create_users_table = ''' CREATE TABLE IF NOT EXISTS users ( id INTEGER PRIMARY KEY AUTOINCREMENT, user_id text, password text ); ''' add_user = 'INSERT INTO users (user_id, password) VALUES (\'{}\', \'{}\');' get_users = 'SELECT user_id, password FROM users;' get_user_by_user_id = 'SELECT user_id, password FROM users WHERE user_id = \'{}\';' get_user_by_id = 'SELECT user_id, password FROM users WHERE id = \'{}\';''' get_last_user = 'SELECT user_id, password FROM users ORDER BY ID DESC LIMIT 1' drop_table = 'DROP TABLE IF EXISTS {};' class DataBase: """ create a database connection to the SQLite database specified by db_file :param db_file: database file """ def __init__(self, db_file, log, commands=None): """ database connection """ try: self.log = log self.log.info('connecting to database') self.connection = sqlite3.connect(db_file) self.cursor = self.connection.cursor() self.log.info('connection success') self.log.info('sqlite3 version {}'.format(sqlite3.version)) if commands is None: commands = Commands self.command = commands except Exception as e: self.log.exception(e) raise Exception(e) def execute(self, command, *args, **kwargs): try: return self.cursor.execute(command) except Exception as e: self.log.exception(e) def fetch(self, command=None, *args, **kw): if command is not None: self.execute(command) try: return self.cursor.fetchall() except Exception as e: self.log.exception(e) def export_from_table_to_file(self, table, file_name, titles, permission='w'): try: self.cursor.execute("select * from {}".format(table)) table_list = self.cursor.fetchall() with open(file_name, permission) as f: f.write(','.join(titles) + '\n') for i in table_list: s = [] for a in i: s.append(str(a)) f.write(','.join(s) + '\n') except Exception as e: self.log.exception(e) def fetch_log(self, *args, **kw): rows = self.fetch(*args, **kw) if rows is not None: for r in rows: self.log.info(r) return rows class DataBaseExtention(DataBase): # def get_user_credentials(self, user=None, id=None): # users = self.fetch(self.command.get_users) # if user is not None: # for i in users: # if user in i: # return i # if id is not None: # return users[id][1:] # return users[-1][1:] def get_user_credentials(self, user=None, id=None): if user is not None: user_credentials = self.fetch(self.command.get_user_by_user_id.format(user)) elif id is not None: user_credentials = self.fetch(self.command.get_user_by_id.format(id)) else: user_credentials = self.fetch(self.command.get_last_user) if len(user_credentials) > 0: return user_credentials[0] if "__main__" == __name__: import os log_file = os.path.dirname(os.path.abspath(__file__)) + '\\log.txt' db_file = os.path.dirname(os.path.abspath(__file__)) + '\\db.db' log = create_logger(log_file=log_file) database = DataBaseExtention(db_file, log) # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ # database.execute(database.command.drop_table.format('users')) # database.execute(database.command.create_users_table) # database.execute(database.command.add_user.format('cs0008', '123123a')) # database.execute(database.command.add_user.format('af0006', '123123a')) # database.execute(database.command.add_user.format('jh0003', '123123a')) # database.execute(database.command.add_user.format('kb0004', '123123a')) # database.execute(database.command.add_user.format('op0001', '123123a')) # database.execute(database.command.add_user.format('gv0001', '123123a')) # database.execute(database.command.add_user.format('pm0001', '123123a')) # database.execute(database.command.add_user.format('ps0001', '123123a')) # database.execute(database.command.add_user.format('qa0000', '123123a')) # user_credentials = database.get_user_credentials(id='14') # database.connection.commit() # database.connection.close() # print(user_credentials) # create a simple database with websites table that includes ( # url: varchar(1024), # popularity_score: integer, # monthly_visitations: integer # ) # database.command.create_websites_table = ''' # CREATE TABLE IF NOT EXISTS websites ( # id INTEGER PRIMARY KEY AUTOINCREMENT, # url TEXT, # popularity_score INTEGER, # monthly_visitations INTEGER # ) # ''' # database.command.add_website = 'INSERT INTO websites (url, popularity_score, monthly_visitations) VALUES (\'{}\', \'{}\', \'{}\');' # database.execute(database.command.create_websites_table) # database.execute(database.command.add_website.format('https://www.google.com', 5, 4000000000)) # database.execute(database.command.add_website.format('https://www.ynet.com', 3, 5000000)) # database.execute(database.command.add_website.format('https://www.youtube.com', 6, 1300000000)) # database.execute(database.command.add_website.format('https://www.python.org', 5, 1000000)) # database.command.get_site = 'SELECT url, popularity_score, monthly_visitations FROM websites WHERE url = \'{}\';' # url, popularity, visitations = database.fetch(database.command.get_site.format('https://www.python.org'))[0] # # print(url, popularity, visitations) database.export_from_table_to_file( table='websites', file_name='exported.csv', titles=('id', 'url', 'popularity_score', 'monthly_visitations') ) # database.connection.commit() database.connection.close()
[((111, 10, 111, 42), 'lessons.sqlite_example.log.create', 'create_logger', (), '', True, 'from lessons.sqlite_example.log import create as create_logger\n'), ((38, 30, 38, 54), 'sqlite3.connect', 'sqlite3.connect', ({(38, 46, 38, 53): 'db_file'}, {}), '(db_file)', False, 'import sqlite3\n'), ((109, 31, 109, 56), 'os.path.abspath', 'os.path.abspath', ({(109, 47, 109, 55): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((110, 30, 110, 55), 'os.path.abspath', 'os.path.abspath', ({(110, 46, 110, 54): '__file__'}, {}), '(__file__)', False, 'import os\n')]
emmawoollett/projectx
backend/app/projectx/routing.py
c061df01d581456884f46c2b8e3b478626501dec
from django.urls import re_path from projectx.consumers import UserWebSocketConsumer from .consumers import UserWebSocketConsumer websocket_urlpatterns = [ re_path(r"^ws/$", UserWebSocketConsumer.as_asgi()), ]
[((8, 22, 8, 53), 'projectx.consumers.UserWebSocketConsumer.as_asgi', 'UserWebSocketConsumer.as_asgi', ({}, {}), '()', False, 'from projectx.consumers import UserWebSocketConsumer\n')]
lab360-ch/aldryn-search
aldryn_search/cms_apps.py
15a319edac126aa1e44f22d34a7bcb5aec3e3dde
from django.utils.translation import ugettext_lazy as _ from cms.app_base import CMSApp from cms.apphook_pool import apphook_pool from .conf import settings class AldrynSearchApphook(CMSApp): name = _("aldryn search") def get_urls(self, *args, **kwargs): return ['aldryn_search.urls'] if settings.ALDRYN_SEARCH_REGISTER_APPHOOK: apphook_pool.register(AldrynSearchApphook)
[((10, 11, 10, 29), 'django.utils.translation.ugettext_lazy', '_', ({(10, 13, 10, 28): '"""aldryn search"""'}, {}), "('aldryn search')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((17, 4, 17, 46), 'cms.apphook_pool.apphook_pool.register', 'apphook_pool.register', ({(17, 26, 17, 45): 'AldrynSearchApphook'}, {}), '(AldrynSearchApphook)', False, 'from cms.apphook_pool import apphook_pool\n')]
t2y/python-study
BizPy/openpyxl/20200513/horizontal_chart.py
52a132ea600d4696164e540d8a8f8f5fc58e097a
import pandas as pd from openpyxl import Workbook from openpyxl.chart import BarChart, Reference wb = Workbook() ws = wb.active df = pd.read_csv('population.csv') ws.append(df.columns.tolist()) for row in df.values: ws.append(list(row)) row_length = 1 + len(df.values) values = Reference(ws, min_col=2, max_col=2, min_row=1, max_row=row_length) categories = Reference(ws, min_col=1, min_row=2, max_row=row_length) chart = BarChart() chart.type = 'bar' chart.style = 11 chart.shape = 4 chart.title = '都道府県別の人口' chart.x_axis.title = '都道府県' chart.y_axis.title = '人口' chart.add_data(values, titles_from_data=True) chart.set_categories(categories) ws.add_chart(chart, 'A9') wb.save('population_horizontal.xlsx')
[((5, 5, 5, 15), 'openpyxl.Workbook', 'Workbook', ({}, {}), '()', False, 'from openpyxl import Workbook\n'), ((8, 5, 8, 34), 'pandas.read_csv', 'pd.read_csv', ({(8, 17, 8, 33): '"""population.csv"""'}, {}), "('population.csv')", True, 'import pandas as pd\n'), ((14, 9, 14, 75), 'openpyxl.chart.Reference', 'Reference', (), '', False, 'from openpyxl.chart import BarChart, Reference\n'), ((15, 13, 15, 68), 'openpyxl.chart.Reference', 'Reference', (), '', False, 'from openpyxl.chart import BarChart, Reference\n'), ((17, 8, 17, 18), 'openpyxl.chart.BarChart', 'BarChart', ({}, {}), '()', False, 'from openpyxl.chart import BarChart, Reference\n')]
alex/changes
changes/api/serializer/models/logsource.py
69a17b4c639e7082a75d037384ccb68ead3a0b4b
from changes.api.serializer import Serializer, register from changes.models.log import LogSource @register(LogSource) class LogSourceSerializer(Serializer): def serialize(self, instance, attrs): return { 'id': instance.id.hex, 'job': { 'id': instance.job_id.hex, }, 'name': instance.name, 'step': instance.step, 'dateCreated': instance.date_created, }
[((5, 1, 5, 20), 'changes.api.serializer.register', 'register', ({(5, 10, 5, 19): 'LogSource'}, {}), '(LogSource)', False, 'from changes.api.serializer import Serializer, register\n')]
IsaacYangSLA/NVFlare
examples/prostate/data_preparation/utils/nrrd_to_nifti.py
8c6582894c9a8431f64479bc9f472fefcd71e5a7
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import nibabel as nib import nrrd import numpy as np parser = argparse.ArgumentParser("Convert nrrd label to nifti with reference image file for affine") parser.add_argument("--input_path", help="Input nrrd path", type=str) parser.add_argument("--reference_path", help="Reference image path", type=str) parser.add_argument("--output_path", help="Output nifti path", type=str) args = parser.parse_args() img = nib.load(args.reference_path) img_affine = img.affine nrrd = nrrd.read(args.input_path) data = np.flip(nrrd[0], axis=1) nft_img = nib.Nifti1Image(data, img_affine) nib.save(nft_img, args.output_path)
[((21, 9, 21, 100), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({(21, 33, 21, 99): '"""Convert nrrd label to nifti with reference image file for affine"""'}, {}), "(\n 'Convert nrrd label to nifti with reference image file for affine')", False, 'import argparse\n'), ((27, 6, 27, 35), 'nibabel.load', 'nib.load', ({(27, 15, 27, 34): 'args.reference_path'}, {}), '(args.reference_path)', True, 'import nibabel as nib\n'), ((31, 7, 31, 31), 'numpy.flip', 'np.flip', (), '', True, 'import numpy as np\n'), ((33, 10, 33, 43), 'nibabel.Nifti1Image', 'nib.Nifti1Image', ({(33, 26, 33, 30): 'data', (33, 32, 33, 42): 'img_affine'}, {}), '(data, img_affine)', True, 'import nibabel as nib\n'), ((34, 0, 34, 35), 'nibabel.save', 'nib.save', ({(34, 9, 34, 16): 'nft_img', (34, 18, 34, 34): 'args.output_path'}, {}), '(nft_img, args.output_path)', True, 'import nibabel as nib\n')]
jszakmeister/rst2ctags
setup.py
22f4035d9ea1e43a07b91f806014d318b3dc5097
from setuptools import setup import io import os import re version_re = re.compile(r'^__version__ = "([^"]*)"$') # Find the version number. with open('rst2ctags.py', 'r') as f: for line in f: line = line.rstrip() m = version_re.match(line) if m: version = m.group(1) break else: raise RuntimeError("Couldn't find version string in rst2ctags.py") # Load the description. readme_path = os.path.join(os.path.dirname(__file__), 'README.rst') with io.open(readme_path, encoding='utf-8') as f: long_description = f.read() setup( name='rst2ctags', description='Generates ctags-compatible output for the sections of a ' 'reStructuredText document.', long_description=long_description, license='BSD', author='John Szakmeister', author_email='[email protected]', url='https://github.com/jszakmeister/rst2ctags', version=version, py_modules=['rst2ctags'], zip_safe=True, entry_points={ 'console_scripts': [ 'rst2ctags = rst2ctags:cli_main', ], }, classifiers=[ 'License :: OSI Approved :: BSD License', 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Topic :: Software Development', 'Topic :: Text Processing', 'Topic :: Text Processing :: Indexing', 'Topic :: Utilities', ] )
[((8, 13, 8, 53), 're.compile', 're.compile', ({(8, 24, 8, 52): '"""^__version__ = "([^"]*)"$"""'}, {}), '(\'^__version__ = "([^"]*)"$\')', False, 'import re\n'), ((29, 0, 58, 1), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n'), ((24, 27, 24, 52), 'os.path.dirname', 'os.path.dirname', ({(24, 43, 24, 51): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((25, 5, 25, 43), 'io.open', 'io.open', (), '', False, 'import io\n')]
rerobots/hardshare
py-ws/hardshare/cli.py
456e7d1d1eb21d03efc3cd1f7960a1729b62527b
#!/usr/bin/env python # Copyright (C) 2018 rerobots, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Command-line interface """ import argparse import json import logging import logging.handlers import os import os.path import subprocess import sys import uuid import yaml from aiohttp.client_exceptions import ClientConnectorError as ConnectionError from .core import WorkspaceInstance from .mgmt import get_local_config, add_key, add_ssh_path, list_local_keys from .mgmt import find_wd, modify_local, rm_wd from .api import HSAPIClient from .err import Error as HSError from .addons import camera_main, stop_cameras from .addons import add_cmdsh, rm_cmdsh, add_vnc, rm_vnc, add_mistyproxy, rm_mistyproxy def get_config_with_index(id_prefix=None): try: config = get_local_config() except: print('error loading configuration data. does it exist?') return None, None, 1 if len(config['wdeployments']) == 0: print(('ERROR: no workspace deployment in local configuration.')) return config, None, 1 if isinstance(id_prefix, list): if len(id_prefix) == 0: if len(config['wdeployments']) > 1: print('ERROR: ambiguous command: more than 1 workspace deployment defined.') return config, None, 1 index = [0] else: indices = [] for idp in id_prefix: index = find_wd(config, idp) if index is None: print('ERROR: given prefix does not match precisely 1 workspace deployment') return config, None, 1 indices.append(index) index = indices elif id_prefix: index = find_wd(config, id_prefix) if index is None: print('ERROR: given prefix does not match precisely 1 workspace deployment') return config, None, 1 else: if len(config['wdeployments']) > 1: print('ERROR: ambiguous command: more than 1 workspace deployment defined.') return config, None, 1 index = 0 return config, index, 0 def main(argv=None): pkglogger = logging.getLogger('hardshare') pkglogger.setLevel(logging.WARNING) loghandler = logging.handlers.WatchedFileHandler(filename='hardshare_client.log', mode='a', delay=True) loghandler.setLevel(logging.DEBUG) loghandler.setFormatter(logging.Formatter('%(name)s.%(funcName)s (%(levelname)s) (pid: {});' ' %(asctime)s ; %(message)s' .format(os.getpid()))) pkglogger.addHandler(loghandler) if argv is None: argv = sys.argv[1:] argparser = argparse.ArgumentParser(description=('Command-line interface' ' for the hardshare client'), add_help=False) argparser.add_argument('-h', '--help', dest='print_help', action='store_true', default=False, help='print this help message and exit') argparser.add_argument('-V', '--version', action='store_true', default=False, help='print version of hardshare (this) package.', dest='print_version') argparser.add_argument('-v', '--verbose', action='store_true', default=False, help='print verbose messages about actions by the hardshare client', dest='verbose') argparser.add_argument('--format', metavar='FORMAT', default=None, type=str, help=('special output formatting (default is no special formatting); ' 'options: YAML , JSON'), dest='output_format') subparsers = argparser.add_subparsers(dest='command') subparsers.add_parser('version', help='print version number and exit.') help_parser = subparsers.add_parser('help', help='print this help message and exit') help_parser.add_argument('help_target_command', metavar='COMMAND', type=str, nargs='?') config_commanddesc = 'manage local and remote configuration' config_parser = subparsers.add_parser('config', description=config_commanddesc, help=config_commanddesc) config_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of workspace deployment for configuration changes' ' (can be unique prefix); ' 'this argument is not required ' 'if there is only 1 workspace deployment')) config_parser.add_argument('-c', '--create', action='store_true', default=False, dest='create_config', help='if no local configuration is found, then create one') config_parser.add_argument('--add-terminate-prog', metavar='PATH', dest='add_terminate_prog', default=None, help='add program to list of commands to execute') config_parser.add_argument('--rm-terminate-prog', metavar='PATH', dest='rm_terminate_prog', default=None, help=('remove program from list of commands to execute; ' 'for example, ' 'copy-and-paste value shown in `hardshare config -l` here')) config_parser.add_argument('--add-key', metavar='FILE', dest='new_api_token', help='add new account key') config_parser.add_argument('--add-ssh-path', metavar='PATH', dest='new_ssh_path', help='add path to SSH key pair (does NOT copy the key)') config_parser.add_argument('--add-raw-device', metavar='PATH', type=str, dest='raw_device_path', default=None, help='add device file to present in container') config_parser.add_argument('--cprovider', metavar='CPROVIDER', type=str, dest='cprovider', default=None, help='select a container provider: docker, podman, proxy') config_parser.add_argument('--assign-image', metavar='IMG', type=str, dest='cprovider_img', default=None, help='assign image for cprovider to use (advanced option)') config_parser.add_argument('--rm-raw-device', metavar='PATH', type=str, dest='remove_raw_device_path', default=None, help='remove device previously marked for inclusion in container') config_parser.add_argument('--add-init-inside', metavar='CMD', type=str, dest='add_init_inside', default=None, help='add command to be executed inside container') config_parser.add_argument('--rm-init-inside', action='store_true', default=False, dest='rm_init_inside', help='remove (empty) list of commands for inside initialization') config_parser.add_argument('-p', '--prune', action='store_true', default=False, dest='prune_err_keys', help=('delete files in local key directory that' ' are not valid; to get list of' ' files with errors, try `--list`')) config_parser.add_argument('-l', '--list', action='store_true', default=False, dest='list_config', help='list configuration') config_parser.add_argument('--local', action='store_true', default=False, dest='only_local_config', help='only show local configuration data') config_parser.add_argument('--include-dissolved', action='store_true', default=False, dest='include_dissolved', help='include configuration data of dissolved workspace deployments') config_parser.add_argument('--declare', metavar='ID', dest='declared_wdeployment_id', default=None, help=('declare that workspace deployment is' ' hosted here. (this only works if it' ' has been previously registered under' ' the same user account.)')) rules_commanddesc = 'modify access rules (also known as capabilities or permissions)' rules_parser = subparsers.add_parser('rules', description=rules_commanddesc, help=rules_commanddesc) rules_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of target workspace deployment' ' (can be unique prefix); ' 'this argument is not required ' 'if there is only 1 workspace deployment')) rules_parser.add_argument('-l', '--list', action='store_true', default=False, dest='list_rules', help='list all rules') rules_parser.add_argument('--permit-me', action='store_true', default=False, dest='add_rule_permit_me', help='permit instantiations by you (the owner)') rules_parser.add_argument('--drop-all', action='store_true', default=False, dest='drop_all_rules', help=('remove all access rules; ' 'note that access is denied by default, ' 'including to you (the owner)')) rules_parser.add_argument('--permit-all', action='store_true', default=False, dest='add_rule_permit_all', help='permit instantiations by anyone') register_commanddesc = 'register new workspace deployment' register_parser = subparsers.add_parser('register', description=register_commanddesc, help=register_commanddesc) register_parser.add_argument('--permit-more', action='store_false', default=True, dest='register_at_most_one', help=('permit registration of more than 1 wdeployment; ' 'default is to fail if local configuration already ' 'has wdeployment declared')) check_commanddesc = 'check registration of this workspace deployment' check_parser = subparsers.add_parser('check', description=check_commanddesc, help=check_commanddesc) check_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of workspace deployment to check' ' (can be unique prefix)')) dissolve_commanddesc = ('dissolve this workspace deployment, making it' ' unavailable for any future use' ' (THIS CANNOT BE UNDONE)') dissolve_parser = subparsers.add_parser('dissolve', description=dissolve_commanddesc, help=dissolve_commanddesc) dissolve_parser.add_argument('wdid', metavar='ID', nargs='?', default=None, help='id of workspace deployment to dissolve') status_commanddesc = 'get status of local instances and daemon' status_parser = subparsers.add_parser('status', description=status_commanddesc, help=status_commanddesc) status_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of target workspace deployment' ' (can be unique prefix)')) advertise_commanddesc = 'advertise availability, accept new instances' advertise_parser = subparsers.add_parser('ad', description=advertise_commanddesc, help=advertise_commanddesc) advertise_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of workspace deployment to advertise' ' (can be unique prefix); ' 'this argument is not required ' 'if there is only 1 workspace deployment')) advertise_parser.add_argument('-d', '--daemon', action='store_true', default=False, help='detach from invoking terminal (i.e., run as daemon)', dest='become_daemon') attach_camera_commanddesc = 'attach camera stream to workspace deployments' attach_camera_parser = subparsers.add_parser('attach-camera', description=attach_camera_commanddesc, help=attach_camera_commanddesc) attach_camera_parser.add_argument('camera', default=0, type=int, help=('on Linux, 0 typically implies /dev/video0; ' 'if you only have one camera, then try 0')) attach_camera_parser.add_argument('id_prefix', metavar='ID', nargs='*', default=None, help=('id of workspace deployment on which to attach' ' (can be unique prefix); ' 'this argument is not required ' 'if there is only 1 workspace deployment')) attach_camera_parser.add_argument('--width-height', metavar='W,H', type=str, dest='attach_camera_res', default=None, help=('width and height of captured images; ' 'default depends on the supporting drivers')) attach_camera_parser.add_argument('--crop', metavar='CROPCONFIG', type=str, dest='attach_camera_crop_config', default=None, help=('image crop configuration; ' 'default: all wdeployments get full images')) attach_camera_parser.add_argument('-d', '--daemon', action='store_true', default=False, help='detach from invoking terminal (i.e., run as daemon)', dest='become_daemon') stop_cameras_commanddesc = 'stop camera streams previously started by attach-camera' stop_cameras_parser = subparsers.add_parser('stop-cameras', description=stop_cameras_commanddesc, help=stop_cameras_commanddesc) stop_cameras_parser.add_argument('-a', '--all', action='store_true', default=False, help=('stop all attached cameras associated with this ' 'user account, whether or not started on this host'), dest='all_cameras') addon_cmdsh_commanddesc = 'manage add-on cmdsh for your workspace deployments' addon_cmdsh_parser = subparsers.add_parser('addon-cmdsh', description=addon_cmdsh_commanddesc, help=addon_cmdsh_commanddesc) addon_cmdsh_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of workspace deployment' ' (can be unique prefix); ' 'this argument is not required ' 'if there is only 1 workspace deployment')) addon_cmdsh_parser.add_argument('--add', action='store_true', default=False, help='add add-on cmdsh to enable terminal access via WebSockets', dest='add_addon_cmdsh') addon_cmdsh_parser.add_argument('--rm', action='store_true', default=False, help='remove add-on cmdsh', dest='rm_addon_cmdsh') addon_vnc_commanddesc = 'manage add-on vnc for your workspace deployments' addon_vnc_parser = subparsers.add_parser('addon-vnc', description=addon_vnc_commanddesc, help=addon_vnc_commanddesc) addon_vnc_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of workspace deployment' ' (can be unique prefix); ' 'this argument is not required ' 'if there is only 1 workspace deployment')) addon_vnc_parser.add_argument('--add', action='store_true', default=False, help='add add-on vnc to enable VNC via rerobots.net', dest='add_addon_vnc') addon_vnc_parser.add_argument('--rm', action='store_true', default=False, help='remove add-on vnc', dest='rm_addon_vnc') addon_mistyproxy_commanddesc = 'manage add-on mistyproxy for your workspace deployments' addon_mistyproxy_parser = subparsers.add_parser('addon-mistyproxy', description=addon_mistyproxy_commanddesc, help=addon_mistyproxy_commanddesc) addon_mistyproxy_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of workspace deployment' ' (can be unique prefix); ' 'this argument is not required ' 'if there is only 1 workspace deployment')) addon_mistyproxy_parser.add_argument('--add', action='store_true', default=False, help='add add-on mistyproxy to allow HTTP proxy to Misty robots', dest='add_addon_mistyproxy') addon_mistyproxy_parser.add_argument('--ip', metavar='ADDRESS', default=None, help='IP address of the Misty robot', dest='targetaddr') addon_mistyproxy_parser.add_argument('--rm', action='store_true', default=False, help='remove add-on mistyproxy', dest='rm_addon_mistyproxy') terminate_commanddesc = 'mark as unavailable; optionally wait for current instance to finish' terminate_parser = subparsers.add_parser('stop-ad', description=terminate_commanddesc, help=terminate_commanddesc) terminate_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None, help=('id of target workspace deployment' ' (can be unique prefix)')) terminate_parser.add_argument('-f', '--force', action='store_true', default=False, help=('if there is an active instance, then' ' stop it without waiting'), dest='force_terminate') help_message_purge = ('if the server indicates that an instance is active,' ' but there is not one or it is otherwise in a' ' non-recoverable state, then mark it remotely as' ' terminated and attempt local clean-up; this' ' command is a last resort. First, try `hardshare' ' terminate` without --purge.') terminate_parser.add_argument('--purge', action='store_true', default=False, help=help_message_purge, dest='purge_supposed_instance') argv_parsed = argparser.parse_args(argv) if argv_parsed.print_version or argv_parsed.command == 'version': from . import __version__ as hardshare_pkg_version print(hardshare_pkg_version) return 0 elif argv_parsed.command is None or argv_parsed.command == 'help': if hasattr(argv_parsed, 'help_target_command') and argv_parsed.help_target_command is not None: if argv_parsed.help_target_command == 'config': config_parser.print_help() elif argv_parsed.help_target_command == 'rules': rules_parser.print_help() elif argv_parsed.help_target_command == 'register': register_parser.print_help() elif argv_parsed.help_target_command == 'check': check_parser.print_help() elif argv_parsed.help_target_command == 'dissolve': dissolve_parser.print_help() elif argv_parsed.help_target_command == 'status': status_parser.print_help() elif argv_parsed.help_target_command == 'attach-camera': attach_camera_parser.print_help() elif argv_parsed.help_target_command == 'stop-cameras': stop_cameras_parser.print_help() elif argv_parsed.help_target_command == 'addon-cmdsh': addon_cmdsh_parser.print_help() elif argv_parsed.help_target_command == 'addon-vnc': addon_vnc_parser.print_help() elif argv_parsed.help_target_command == 'addon-mistyproxy': addon_mistyproxy_parser.print_help() elif argv_parsed.help_target_command == 'ad': advertise_parser.print_help() elif argv_parsed.help_target_command == 'stop-ad': terminate_parser.print_help() else: argparser.print_help() else: argparser.print_help() return 0 if argv_parsed.verbose: pkglogger.setLevel(logging.DEBUG) if argv_parsed.output_format is not None: output_format = argv_parsed.output_format.lower() if output_format not in ['yaml', 'json']: print('output format unrecognized: {}'.format(argv_parsed.output_format)) return 1 else: output_format = None try: ac = HSAPIClient() except: ac = None if argv_parsed.command == 'status': try: config = get_local_config() except: print('error loading configuration data. does it exist?') return 1 if argv_parsed.id_prefix is None: if len(config['wdeployments']) == 0: findings = [WorkspaceInstance.inspect_instance()] else: findings = [] for wd in config['wdeployments']: findings.append(WorkspaceInstance.inspect_instance(wdeployment=wd)) else: findings = [] for m in find_wd(config, argv_parsed.id_prefix, one_or_none=False): findings.append(WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][m])) if output_format == 'json': print(json.dumps(findings)) else: # output_format == 'yaml' print(yaml.dump(findings, default_flow_style=False)) elif argv_parsed.command == 'attach-camera': config, indices, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc wdeployments = [config['wdeployments'][jj]['id'] for jj in indices] local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() if argv_parsed.attach_camera_res: width, height = [int(x) for x in argv_parsed.attach_camera_res.split(',')] if width < 1 or height < 1: print('Width, height must be positive') return 1 else: width, height = None, None if argv_parsed.attach_camera_crop_config: crop = json.loads(argv_parsed.attach_camera_crop_config) else: crop = None if argv_parsed.become_daemon: if os.fork() != 0: return 0 os.close(0) os.close(1) os.close(2) try: camera_main(wdeployments, tok=tok, dev=argv_parsed.camera, width=width, height=height, crop=crop) except ConnectionError: if not argv_parsed.become_daemon: print('ERROR: failed to reach server. Are you connected to the Internet?') return 1 elif argv_parsed.command == 'stop-cameras': local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() try: stop_cameras(tok, allcam=argv_parsed.all_cameras) except ConnectionError: print('ERROR: failed to reach server. Are you connected to the Internet?') return 1 elif argv_parsed.command == 'addon-cmdsh': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc wdeployment_id = config['wdeployments'][index]['id'] local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() try: if argv_parsed.add_addon_cmdsh: add_cmdsh(wdeployment_id, tok) elif argv_parsed.rm_addon_cmdsh: rm_cmdsh(wdeployment_id, tok) else: print('Use `hardshare addon-cmdsh` with a switch.') print('To get a help message, enter\n\n hardshare help addon-cmdsh') return 1 except ValueError as err: print('ERROR: {}'.format(err)) return 1 elif argv_parsed.command == 'addon-vnc': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc wdeployment_id = config['wdeployments'][index]['id'] local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() try: if argv_parsed.add_addon_vnc: add_vnc(wdeployment_id, tok) elif argv_parsed.rm_addon_vnc: rm_vnc(wdeployment_id, tok) else: print('Use `hardshare addon-vnc` with a switch.') print('To get a help message, enter\n\n hardshare help addon-vnc') return 1 except ValueError as err: print('ERROR: {}'.format(err)) return 1 elif argv_parsed.command == 'addon-mistyproxy': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc wdeployment_id = config['wdeployments'][index]['id'] local_keys = list_local_keys() if len(local_keys) < 1: print('No valid keys available. Check: `hardshare config -l`') return 1 with open(local_keys[0], 'rt') as fp: tok = fp.read().strip() try: if argv_parsed.add_addon_mistyproxy: if argv_parsed.targetaddr is None: print('--ip is required with --add') return 1 add_mistyproxy(wdeployment_id, tok, argv_parsed.targetaddr) elif argv_parsed.rm_addon_mistyproxy: rm_mistyproxy(wdeployment_id, tok) else: print('Use `hardshare addon-mistyproxy` with a switch.') print('To get a help message, enter\n\n hardshare help addon-mistyproxy') return 1 except ValueError as err: print('ERROR: {}'.format(err)) return 1 elif argv_parsed.command == 'ad': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc if 'ssh_key' not in config or config['ssh_key'] is None: print('WARNING: local configuration does not declare SSH key.\n' 'Instances with connection type sshtun cannot launch.') pkglogger.removeHandler(loghandler) if argv_parsed.become_daemon: if os.fork() != 0: return 0 os.close(0) os.close(1) os.close(2) else: pkglogger.addHandler(logging.StreamHandler()) logfname = 'hardshare_client.{}.log'.format(config['wdeployments'][index]['id']) loghandler = logging.FileHandler(filename=logfname, mode='a', delay=True) loghandler.setLevel(logging.DEBUG) loghandler.setFormatter(logging.Formatter('%(name)s.%(funcName)s (%(levelname)s) (pid: {});' ' %(asctime)s ; %(message)s' .format(os.getpid()))) pkglogger.addHandler(loghandler) return ac.run_sync(config['wdeployments'][index]['id']) elif argv_parsed.command == 'stop-ad': config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc if argv_parsed.purge_supposed_instance: cprovider = config['wdeployments'][index]['cprovider'] if cprovider == 'proxy': print('--purge not supported for cprovider `proxy`') return 1 elif cprovider not in ['docker', 'podman']: print('unknown cprovider: {}'.format(cprovider)) return 1 findings = WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][index]) if 'container' in findings: try: subprocess.check_call([cprovider, 'rm', '-f', findings['container']['name']], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except: print('failed to stop container `{}`'.format(findings['container']['name'])) return 1 return 0 else: print('failed to detect local instance') return 1 else: if ac is None: print('cannot terminate without valid API client') return 1 try: ac.terminate(config['wdeployments'][index]['id']) except FileNotFoundError: print('ERROR: cannot reach daemon. Does it exist? (Try `hardshare status`)') return 1 return 0 elif argv_parsed.command == 'register': if ac is None: print('cannot register without initial local configuration.' ' (try `hardshare config --create`)') return 1 try: print(ac.register_new(at_most_one=argv_parsed.register_at_most_one)) except HSError as err: print('ERROR: {}'.format(err)) return 1 except ConnectionError: print('ERROR: failed to reach server. Are you connected to the Internet?') return 1 elif argv_parsed.command == 'rules': if ac is None: print('no local configuration found. (try `hardshare config -h`)') return 1 if argv_parsed.id_prefix is None: wdid = None else: try: wdid = str(uuid.UUID(argv_parsed.id_prefix)) except: config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: print('The given ID does not appear to be valid.') return 1 wdid = config['wdeployments'][index]['id'] if argv_parsed.list_rules: try: res = ac.get_access_rules(wdid) except Exception as err: print('{}'.format(err)) return 1 if 'err' in res: if res['err'] == 'wrong authorization token': print('wrong API token. Did it expire?') else: print(res['err']) return 1 res['comments'] = [ 'Access is denied unless a rule explicitly permits it.', ] if output_format == 'json': print(json.dumps(res)) else: # output_format == 'yaml' print(yaml.dump(res, default_flow_style=False)) elif argv_parsed.drop_all_rules or argv_parsed.add_rule_permit_me: try: if argv_parsed.drop_all_rules: ac.drop_access_rules(wdid) elif argv_parsed.add_rule_permit_me: ac.add_access_rule(wdid) except Exception as err: print('{}'.format(err)) return 1 elif argv_parsed.add_rule_permit_all: ui_input = None while ui_input not in ('y', 'yes'): print('Do you want to permit access by anyone? [y/N] ', end='') ui_input = input().lower() if ui_input in ('n', 'no', ''): return 1 try: ac.add_access_rule(wdid, to_user='*') except Exception as err: print('{}'.format(err)) return 1 else: print('Use `hardshare rules` with a switch. For example, `hardshare rules -l`') print('or to get a help message, enter\n\n hardshare help rules') return 1 elif argv_parsed.command == 'check': if ac is None: print('no local configuration found. (try `hardshare config -h`)') return 1 try: res = ac.check_registration(argv_parsed.id_prefix) except: print('Error occurred while contacting remote server ' 'at {}'.format(ac.base_uri)) return 1 if 'err' in res: if res['err'] == 'not found': print('not found: workspace deployment with id prefix {}' .format(res['id_prefix'])) elif res['err'] == 'wrong authorization token': print('wrong API token. Did it expire?') else: print(res['err']) return 1 else: print('summary of workspace deployment {}'.format(res['id'])) print('\tcreated: {}'.format(res['date_created'])) print('\torigin (address) of registration: {}'.format(res['origin'])) if 'date_dissolved' in res: print('\tdissolved: {}'.format(res['date_dissolved'])) elif argv_parsed.command == 'dissolve': if ac is None: print('no local configuration found. (try `hardshare config -h`)') return 1 try: wdid = str(uuid.UUID(argv_parsed.wdid)) except: print('The given ID does not appear to be valid.') return 1 ui_input = None while ui_input not in ('y', 'yes'): print(('Do you want to dissolve {}? This action cannot be undone. ' '[y/N] ').format(wdid), end='') ui_input = input().lower() if ui_input in ('n', 'no', ''): return 1 try: res = ac.dissolve_registration(wdid) except: print('Error occurred while contacting remote server ' 'at {}'.format(ac.base_uri)) return 1 if 'err' in res: if res['err'] == 'not found': print('not found: workspace deployment with id prefix {}' .format(res['id_prefix'])) elif res['err'] == 'wrong authorization token': print('wrong API token. Did it expire?') else: print(res['err']) return 1 # Remove from local configuration, if present rm_wd(get_local_config(), wdid, save=True) elif argv_parsed.command == 'config': if argv_parsed.list_config: try: config = get_local_config(create_if_empty=argv_parsed.create_config, collect_errors=True) except: print('error loading configuration data.' ' does it exist? is it broken?') return 1 if not argv_parsed.only_local_config: # Try to get remote config, given possibly new local config try: assert ac is not None remote_config = ac.get_remote_config(include_dissolved=argv_parsed.include_dissolved) except HSError as err: print('Error: {}'.format(err)) return 1 except: print('Error occurred while contacting rerobots servers') print('Try config -l --local to only get local information') return 1 config = { 'local': config, 'remote': remote_config, } if 'local' in config: ref = config['local']['wdeployments'] else: ref = config['wdeployments'] for jj, wdeployment in enumerate(ref): ref[jj]['url'] = 'https://rerobots.net/workspace/{}'.format(wdeployment['id']) if output_format == 'json': print(json.dumps(config)) elif output_format == 'yaml': print(yaml.dump(config, default_flow_style=False)) else: if 'local' not in config: config = { 'local': config, 'remote': None, } print('workspace deployments defined in local configuration:') if len(config['local']['wdeployments']) == 0: print('\t(none)') else: for wdeployment in config['local']['wdeployments']: print('{}\n\turl: {}\n\towner: {}\n\tcprovider: {}\n\tcargs: {}'.format( wdeployment['id'], wdeployment['url'], wdeployment['owner'], wdeployment['cprovider'], wdeployment['cargs'], )) if wdeployment['cprovider'] in ['docker', 'podman']: print('\timg: {}'.format(wdeployment['image'])) if wdeployment['terminate']: print('\tterminate:') for terminate_p in wdeployment['terminate']: print('\t\t{}'.format(terminate_p)) print('\nfound keys:') if len(config['local']['keys']) == 0: print('\t(none)') else: print('\t' + '\n\t'.join(config['local']['keys'])) if 'err_keys' in config['local'] and len(config['local']['err_keys']) > 0: print('found possible keys with errors:') for err_key_path, err in config['local']['err_keys'].items(): print('\t {}: {}'.format(err, err_key_path)) if config['remote']: if 'err' in config['remote']: print('Error occurred while contacting remote server.') if config['remote']['err'] == 'wrong authorization token': print('wrong API token. Did it expire?') else: print(config['remote']['err']) return 1 if len(config['remote']['deployments']) == 0: print('\nno registered workspace deployments with this user account') else: print('\nregistered workspace deployments with this user account:') for wd in config['remote']['deployments']: print('{}'.format(wd['id'])) print('\tcreated: {}'.format(wd['date_created'])) if wd['desc'] is not None: print('\tdesc: {}'.format(wd['desc'])) print('\torigin (address) of registration: {}' .format(wd['origin'])) if wd['dissolved']: print('\tdissolved: {}'.format(wd['dissolved'])) elif argv_parsed.prune_err_keys: _, errored_keys = list_local_keys(collect_errors=True) for err_key_path, err in errored_keys.items(): print('deleting {}...'.format(err_key_path)) os.unlink(err_key_path) elif argv_parsed.new_api_token: try: add_key(argv_parsed.new_api_token) except: print('failed to add key') return 1 elif argv_parsed.new_ssh_path: try: add_ssh_path(argv_parsed.new_ssh_path) except: print('ERROR: {} or {} does not exist or ' 'has the wrong permissions.'.format( argv_parsed.new_ssh_path, argv_parsed.new_ssh_path + '.pub' )) return 1 elif argv_parsed.create_config: get_local_config(create_if_empty=True) elif argv_parsed.declared_wdeployment_id is not None: assert ac is not None ac.declare_existing(argv_parsed.declared_wdeployment_id) ac.sync_config() elif argv_parsed.raw_device_path is not None: config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc cprovider = config['wdeployments'][index]['cprovider'] if cprovider == 'proxy': print('--add-raw-device not supported for cprovider `proxy`') return 1 elif cprovider not in ['docker', 'podman']: print('unknown cprovider: {}'.format(cprovider)) return 1 if not os.path.exists(argv_parsed.raw_device_path): print('ERROR: given device file does not exist') return 1 carg = '--device={D}:{D}'.format(D=argv_parsed.raw_device_path) config['wdeployments'][index]['cargs'].append(carg) modify_local(config) elif argv_parsed.remove_raw_device_path is not None: config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc carg = '--device={D}:{D}'.format(D=argv_parsed.remove_raw_device_path) config['wdeployments'][index]['cargs'].remove(carg) modify_local(config) elif argv_parsed.add_init_inside is not None: config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc cprovider = config['wdeployments'][index]['cprovider'] if cprovider == 'proxy': print('--add-init-inside not supported for cprovider `proxy`') return 1 elif cprovider not in ['docker', 'podman']: print('unknown cprovider: {}'.format(cprovider)) return 1 config['wdeployments'][index]['init_inside'].append(argv_parsed.add_init_inside) modify_local(config) elif argv_parsed.rm_init_inside: config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc cprovider = config['wdeployments'][index]['cprovider'] if cprovider == 'proxy': print('--rm-init-inside not supported for cprovider `proxy`') return 1 elif cprovider not in ['docker', 'podman']: print('unknown cprovider: {}'.format(cprovider)) return 1 config['wdeployments'][index]['init_inside'] = [] modify_local(config) elif argv_parsed.cprovider is not None: selected_cprovider = argv_parsed.cprovider.lower() if selected_cprovider not in ['docker', 'podman', 'proxy']: print('ERROR: cprovider must be one of the following: docker, podman, proxy') return 1 config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc config['wdeployments'][index]['cprovider'] = selected_cprovider if selected_cprovider == 'proxy': config['wdeployments'][index]['image'] = None else: # selected_cprovider \in {docker, podman} if config['wdeployments'][index]['image'] is None: config['wdeployments'][index]['image'] = 'rerobots/hs-generic' modify_local(config) elif argv_parsed.cprovider_img is not None: config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc cprovider = config['wdeployments'][index]['cprovider'] if cprovider not in ['docker', 'podman', 'proxy']: print('unknown cprovider: {}'.format(cprovider)) return 1 if cprovider == 'podman': cp_images = subprocess.run([cprovider, 'image', 'exists', argv_parsed.cprovider_img]) if cp_images.returncode != 0: print('ERROR: given image name is not recognized by cprovider') return 1 elif cprovider == 'docker': cp_images = subprocess.run([cprovider, 'image', 'inspect', argv_parsed.cprovider_img], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if cp_images.returncode != 0: print('ERROR: given image name is not recognized by cprovider') return 1 else: # cprovider == 'proxy' print('ERROR: --assign-image not supported for cprovider `proxy`') return 1 config['wdeployments'][index]['image'] = argv_parsed.cprovider_img modify_local(config) elif argv_parsed.add_terminate_prog is not None: config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc normalized_path = os.path.abspath(argv_parsed.add_terminate_prog) if not os.path.exists(normalized_path): print('ERROR: given path does not exist') return 1 config['wdeployments'][index]['terminate'].append(normalized_path) modify_local(config) elif argv_parsed.rm_terminate_prog is not None: config, index, rc = get_config_with_index(argv_parsed.id_prefix) if rc != 0: return rc config['wdeployments'][index]['terminate'].remove(argv_parsed.rm_terminate_prog) modify_local(config) else: print('Use `hardshare config` with a switch. For example, `hardshare config -l`') print('or to get a help message, enter\n\n hardshare help config') return 1 return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
[((77, 16, 77, 46), 'logging.getLogger', 'logging.getLogger', ({(77, 34, 77, 45): '"""hardshare"""'}, {}), "('hardshare')", False, 'import logging\n'), ((79, 17, 79, 107), 'logging.handlers.WatchedFileHandler', 'logging.handlers.WatchedFileHandler', (), '', False, 'import logging\n'), ((88, 16, 89, 98), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((83, 54, 83, 65), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((430, 18, 430, 38), 'json.dumps', 'json.dumps', ({(430, 29, 430, 37): 'findings'}, {}), '(findings)', False, 'import json\n'), ((432, 18, 432, 63), 'yaml.dump', 'yaml.dump', (), '', False, 'import yaml\n'), ((457, 19, 457, 68), 'json.loads', 'json.loads', ({(457, 30, 457, 67): 'argv_parsed.attach_camera_crop_config'}, {}), '(argv_parsed.attach_camera_crop_config)', False, 'import json\n'), ((464, 12, 464, 23), 'os.close', 'os.close', ({(464, 21, 464, 22): '(0)'}, {}), '(0)', False, 'import os\n'), ((465, 12, 465, 23), 'os.close', 'os.close', ({(465, 21, 465, 22): '(1)'}, {}), '(1)', False, 'import os\n'), ((466, 12, 466, 23), 'os.close', 'os.close', ({(466, 21, 466, 22): '(2)'}, {}), '(2)', False, 'import os\n'), ((462, 15, 462, 24), 'os.fork', 'os.fork', ({}, {}), '()', False, 'import os\n'), ((611, 21, 611, 81), 'logging.FileHandler', 'logging.FileHandler', (), '', False, 'import logging\n'), ((605, 12, 605, 23), 'os.close', 'os.close', ({(605, 21, 605, 22): '(0)'}, {}), '(0)', False, 'import os\n'), ((606, 12, 606, 23), 'os.close', 'os.close', ({(606, 21, 606, 22): '(1)'}, {}), '(1)', False, 'import os\n'), ((607, 12, 607, 23), 'os.close', 'os.close', ({(607, 21, 607, 22): '(2)'}, {}), '(2)', False, 'import os\n'), ((603, 15, 603, 24), 'os.fork', 'os.fork', ({}, {}), '()', False, 'import os\n'), ((609, 33, 609, 56), 'logging.StreamHandler', 'logging.StreamHandler', ({}, {}), '()', False, 'import logging\n'), ((615, 58, 615, 69), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((634, 20, 637, 68), 'subprocess.check_call', 'subprocess.check_call', (), '', False, 'import subprocess\n'), ((680, 27, 680, 59), 'uuid.UUID', 'uuid.UUID', ({(680, 37, 680, 58): 'argv_parsed.id_prefix'}, {}), '(argv_parsed.id_prefix)', False, 'import uuid\n'), ((706, 22, 706, 37), 'json.dumps', 'json.dumps', ({(706, 33, 706, 36): 'res'}, {}), '(res)', False, 'import json\n'), ((708, 22, 708, 62), 'yaml.dump', 'yaml.dump', (), '', False, 'import yaml\n'), ((772, 23, 772, 50), 'uuid.UUID', 'uuid.UUID', ({(772, 33, 772, 49): 'argv_parsed.wdid'}, {}), '(argv_parsed.wdid)', False, 'import uuid\n'), ((841, 22, 841, 40), 'json.dumps', 'json.dumps', ({(841, 33, 841, 39): 'config'}, {}), '(config)', False, 'import json\n'), ((907, 16, 907, 39), 'os.unlink', 'os.unlink', ({(907, 26, 907, 38): 'err_key_path'}, {}), '(err_key_path)', False, 'import os\n'), ((844, 22, 844, 65), 'yaml.dump', 'yaml.dump', (), '', False, 'import yaml\n'), ((946, 19, 946, 62), 'os.path.exists', 'os.path.exists', ({(946, 34, 946, 61): 'argv_parsed.raw_device_path'}, {}), '(argv_parsed.raw_device_path)', False, 'import os\n'), ((1017, 28, 1017, 101), 'subprocess.run', 'subprocess.run', ({(1017, 43, 1017, 100): "[cprovider, 'image', 'exists', argv_parsed.cprovider_img]"}, {}), "([cprovider, 'image', 'exists', argv_parsed.cprovider_img])", False, 'import subprocess\n'), ((1040, 30, 1040, 77), 'os.path.abspath', 'os.path.abspath', ({(1040, 46, 1040, 76): 'argv_parsed.add_terminate_prog'}, {}), '(argv_parsed.add_terminate_prog)', False, 'import os\n'), ((1022, 28, 1024, 69), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n'), ((1042, 19, 1042, 50), 'os.path.exists', 'os.path.exists', ({(1042, 34, 1042, 49): 'normalized_path'}, {}), '(normalized_path)', False, 'import os\n')]
kkcookies99/UAST
Dataset/Leetcode/train/7/93.py
fff81885aa07901786141a71e5600a08d7cb4868
class Solution: def XXX(self, x: int) -> int: def solve(x): a = list(map(int,str(x))) p = {} d=0 for ind, val in enumerate(a): p[ind] = val for i, v in p.items(): d += v*(10**i) if (2**31 - 1>= d >= -(2**31)): return d else: return 0 if x>=0: return (solve(x)) if x<0: x = -x return (-solve(x))
[]
JenBanks8585/Labs_CitySpireDS
app/realty.py
4755bd5ce718ee2f65f6a53a5918bd0cf18b2ddf
"""Realty Info""" import os import requests from dotenv import load_dotenv from fastapi import APIRouter, Depends import sqlalchemy from pydantic import BaseModel, SecretStr from app import config from app.walk_score import * load_dotenv() router = APIRouter() headers = {'x-rapidapi-key': os.getenv('api_key'), 'x-rapidapi-host': os.getenv('host') } @router.get('/streamlined_rent_list') async def streamlined_rent_list(api_key = config.settings.api_key, city: str = "New York City", state: str= "NY", prop_type: str = "condo", limit: int = 4): """ Parameters: api_key city: str state: str prop_type: str ('condo', 'single_family', 'multi_family') limit: int number of results to populate Returns: information about properties for rent """ url = os.getenv('url_list_for_rent') querystring = {"city": city, "state_code": state, "limit": limit, "offset": "0", "sort":"relevance", "prop_type": prop_type} response_for_rent = requests.request("GET", url, params = querystring, headers = headers,) response = response_for_rent.json()['properties'] rental_list = [] for i in range(limit): line = response[i]['address']['line'] city = response[i]['address']['city'] state = response[i]['address']['state'] lat = response[i]['address']['lat'] lon = response[i]['address']['lon'] photos = response[i]['photos'] address = line +" "+ city + " "+ state walk_score = just_walk_score(address, lat, lon) element = {'address': address, 'lat': lat, 'lon': lon, 'city':city, 'state':state, 'photos': photos, 'walk_score': walk_score} rental_list.append(element) return rental_list @router.get('/for_rent_list') async def for_rent_list(api_key = config.settings.api_key, city: str = "New York City", state: str= "NY", prop_type: str = "condo", limit: int = 4): """ Parameters: api_key city: str state: str prop_type: str ('condo', 'single_family', 'multi_family') limit: int number of results to populate Returns: information about properties for rent """ url = os.getenv('url_list_for_rent') querystring = {"city": city, "state_code": state, "limit": limit, "offset": "0", "sort":"relevance", "prop_type": prop_type} response_for_rent = requests.request("GET", url, params = querystring, headers = headers,) return response_for_rent.json()['properties'] @router.get('/for_rent_list/{property_id}') async def property_detail(property_id: str = "O3599084026"): """ Parameters: property_id Returns: detailed information about the property """ url = os.getenv('url_property_detail') querystring = {"property_id":property_id} response_prop_detail = requests.request("GET", url, headers=headers, params=querystring) return response_prop_detail.json()['properties'] @router.get('/for_sale_list') async def for_sale_list(api_key = config.settings.api_key, city = "New York City", state= "NY", limit = 4): url = os.getenv('url_list_for_sale') querystring = {"city": city ,"limit": limit,"offset":"0","state_code": state,"sort":"relevance"} response_for_sale = requests.request("GET", url, headers=headers, params=querystring) return response_for_sale.json()['properties']
[((14, 0, 14, 13), 'dotenv.load_dotenv', 'load_dotenv', ({}, {}), '()', False, 'from dotenv import load_dotenv\n'), ((16, 9, 16, 20), 'fastapi.APIRouter', 'APIRouter', ({}, {}), '()', False, 'from fastapi import APIRouter, Depends\n'), ((18, 29, 18, 49), 'os.getenv', 'os.getenv', ({(18, 39, 18, 48): '"""api_key"""'}, {}), "('api_key')", False, 'import os\n'), ((19, 30, 19, 47), 'os.getenv', 'os.getenv', ({(19, 40, 19, 46): '"""host"""'}, {}), "('host')", False, 'import os\n'), ((40, 10, 40, 40), 'os.getenv', 'os.getenv', ({(40, 20, 40, 39): '"""url_list_for_rent"""'}, {}), "('url_list_for_rent')", False, 'import os\n'), ((48, 24, 48, 94), 'requests.request', 'requests.request', (), '', False, 'import requests\n'), ((92, 10, 92, 40), 'os.getenv', 'os.getenv', ({(92, 20, 92, 39): '"""url_list_for_rent"""'}, {}), "('url_list_for_rent')", False, 'import os\n'), ((100, 24, 100, 94), 'requests.request', 'requests.request', (), '', False, 'import requests\n'), ((112, 10, 112, 42), 'os.getenv', 'os.getenv', ({(112, 20, 112, 41): '"""url_property_detail"""'}, {}), "('url_property_detail')", False, 'import os\n'), ((115, 27, 115, 92), 'requests.request', 'requests.request', (), '', False, 'import requests\n'), ((126, 10, 126, 40), 'os.getenv', 'os.getenv', ({(126, 20, 126, 39): '"""url_list_for_sale"""'}, {}), "('url_list_for_sale')", False, 'import os\n'), ((129, 24, 129, 89), 'requests.request', 'requests.request', (), '', False, 'import requests\n')]
v0rts/docker-weewx
dist/weewx-4.0.0b3/bin/weewx/junk2.py
70b2f252051dfead4fcb74e74662b297831e6342
from __future__ import print_function import time import weeutil.weeutil import weewx.manager import weewx.xtypes archive_sqlite = {'database_name': '/home/weewx/archive/weepwr.sdb', 'driver': 'weedb.sqlite'} archive_mysql = {'database_name': 'weewx', 'user': 'weewx', 'password': 'weewx', 'driver': 'weedb.mysql'} sql_str = "SELECT %s(%s), MIN(usUnits), MAX(usUnits) FROM %s " \ "WHERE dateTime > ? AND dateTime <= ?" % ('avg', 'outTemp', 'archive') timespan = weeutil.weeutil.TimeSpan(1573245000, 1573246800) timespan = weeutil.weeutil.TimeSpan(1573245000, 1573245000 + 600) print('timespan=', timespan) with weewx.manager.Manager.open(archive_sqlite) as db_manager: interpolate_dict = { 'aggregate_type': 'diff', 'obs_type': 'ch8_a_energy2', 'table_name': db_manager.table_name, 'start': timespan.start, 'stop': timespan.stop, } SQL_TEMPLATE = "SELECT (ch8_a_energy2 - (SELECT ch8_a_energy2 FROM archive WHERE dateTime=%(start)s)) / (%(stop)s - %(start)s) FROM archive WHERE dateTime=%(stop)s;" SQL_TEMPLATE = """Select a.dateTime as StartTime , b.dateTime as EndTime , b.dateTime-a.dateTime as TimeChange , b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange FROM archive a Inner Join archive b ON b.dateTime>=1573245000 AND b.dateTime<=(1573245000 + 600)""" SQL_TEMPLATE = """Select a.dateTime as StartTime, b.datetime as EndTime, b.dateTime-a.dateTime as TimeChange, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange FROM archive a, archive b WHERE b.dateTime = (Select MAX(c.dateTime) FROM archive c WHERE c.dateTime<=(1573245000+600)) AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime>=1573245000);""" SQL_TEMPLATE = """Select a.dateTime as StartTime, b.datetime as EndTime, b.dateTime-a.dateTime as TimeChange, b.ch8_a_energy2-a.ch8_a_energy2 as ValueChange FROM archive a, archive b WHERE b.dateTime = (Select MAX(dateTime) FROM archive WHERE dateTime<=(1573245000+600)) AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime>=1573245000);""" SQL_TEMPLATE = "SELECT (b.%(obs_type)s - a.%(obs_type)s) / (b.dateTime-a.dateTime) "\ "FROM archive a, archive b "\ "WHERE b.dateTime = (SELECT MAX(dateTime) FROM archive WHERE dateTime <= %(stop)s) "\ "AND a.dateTime = (SELECT MIN(dateTime) FROM archive WHERE dateTime >= %(start)s);" sql_stmt = SQL_TEMPLATE % interpolate_dict print(sql_stmt) # Get the number of records with db_manager.connection.cursor() as cursor: for row in cursor.execute(sql_stmt): print(row)
[]
a6502/fast_lemon_api
fast_lemon_api_test.py
09a5b6eec3e84d1d006f927e502a7071a28739cc
#!/usr/bin/env pytest-3 from fastapi.testclient import TestClient from fast_lemon_api import app client = TestClient(app) def test_get_root(): response = client.get("/") assert response.status_code == 200 assert response.text == "Welcome to the fast-lemon-api!\n" neworder = { "isin": "blablablabla", "limit_price": 0.2, "side": "buy", "quantity": 1, "valid_until": 1996943663, "status": "open" } order_id = None def test_post_orders1(): response = client.post('/orders/', json={ "isin": "blablablabla", "limit_price": 0.2, "side": "buy", "quantity": 1, "valid_until": 1996943663, }) assert response.status_code == 201 j = response.json() #print(repr(j)) order_id = j.pop('uuid') assert j == neworder #assert 0 def test_post_orders2(): response = client.post('/orders/', json={ "isin": "blablabla", "limit_price": 0.2, "side": "buy", "quantity": 1, "valid_until": 1996950863 }) assert response.status_code == 422 assert response.json() == { 'detail': [{ 'loc': ['body', 'isin'], 'msg': 'ensure this value has at least 12 characters', 'type': 'value_error.any_str.min_length', 'ctx': { 'limit_value': 12 } }] } def test_post_orders3(): response = client.post('/orders/', json={ "isin": "blablablablabla", "limit_price": 0.2, "side": "buy", "quantity": 1, "valid_until": 1996950863 }) assert response.status_code == 422 assert response.json() == { 'detail': [{ 'ctx': { 'limit_value': 12 }, 'loc': ['body', 'isin'], 'msg': 'ensure this value has at most 12 characters', 'type': 'value_error.any_str.max_length' }] } def test_post_orders4(): response = client.post('/orders/', json={ "isin": "blablablabla", "limit_price": -1, "side": "buy", "quantity": 1, "valid_until": 1996950863 }) assert response.status_code == 422 assert response.json() == { 'detail': [{ 'ctx': { 'limit_value': 0 }, 'loc': ['body', 'limit_price'], 'msg': 'ensure this value is greater than 0', 'type': 'value_error.number.not_gt' }] } def test_post_orders5(): response = client.post('/orders/', json={ "isin": "blablablabla", "limit_price": 0.2, "side": "BUY!", "quantity": 1, "valid_until": 1996950863 }) assert response.status_code == 422 assert response.json() == { 'detail': [{ 'ctx': { 'enum_values': ['buy', 'sell'] }, 'loc': ['body', 'side'], 'msg': "value is not a valid enumeration member; permitted: 'buy', 'sell'", 'type': 'type_error.enum' }] } def test_post_orders6(): response = client.post('/orders/', json={ "isin": "blablablabla", "limit_price": 0.33333, "side": "SELL", "quantity": 0, "valid_until": 1996950863 }) assert response.status_code == 422 assert response.json() == { 'detail': [{ 'ctx': { 'limit_value': 0 }, 'loc': ['body', 'quantity'], 'msg': 'ensure this value is greater than 0', 'type': 'value_error.number.not_gt' }] } def test_post_orders8(): response = client.post('/orders/', json={ "isin": "blablablabla", "limit_price": 0.2, "side": "SELL", "quantity": 1.1, "valid_until": 1996950863 }) assert response.status_code == 422 assert response.json() == { 'detail': [{ 'loc': ['body', 'quantity'], 'msg': 'value is not a valid integer', 'type': 'type_error.integer' }] } def test_post_orders7(): response = client.post('/orders/', json={ "isin": "blablablabla", "limit_price": 0.2, "side": "SELL", "quantity": 2, "valid_until": 1996 }) assert response.status_code == 422 assert response.json() == { 'detail': [{ 'loc': ['body', 'valid_until'], 'msg': 'valid_until cannot be in the past', 'type': 'value_error' }] }
[((7, 9, 7, 24), 'fastapi.testclient.TestClient', 'TestClient', ({(7, 20, 7, 23): 'app'}, {}), '(app)', False, 'from fastapi.testclient import TestClient\n')]
andrewkozlik/pam-u2f
tests/regenerate_credentials.py
5b504783c9af972c790bdcb506867bad7df5e333
#!/bin/python2 import collections import re import subprocess import sys PUC = "../pamu2fcfg/pamu2fcfg" resident = ["", "-r"] presence = ["", "-P"] pin = ["", "-N"] verification = ["", "-V"] Credential = collections.namedtuple("Credential", "keyhandle pubkey attributes oldformat") sshformat = 0 def print_test_case(filename, sshformat, credentials): start = """ cfg.auth_file = "{authfile}"; cfg.sshformat = {ssh}; rc = get_devices_from_authfile(&cfg, username, dev, &n_devs); assert(rc == 1); assert(n_devs == {devices}); """ checks = """ assert(strcmp(dev[{i}].coseType, "es256") == 0); assert(strcmp(dev[{i}].keyHandle, "{kh}") == 0); assert(strcmp(dev[{i}].publicKey, "{pk}") == 0); assert(strcmp(dev[{i}].attributes, "{attr}") == 0); assert(dev[{i}].old_format == {old}); """ free = """ free(dev[{i}].coseType); free(dev[{i}].attributes); free(dev[{i}].keyHandle); free(dev[{i}].publicKey); """ end = """ memset(dev, 0, sizeof(dev_t) * {devices}); """ code = "" free_block = "" code += start.format(authfile = filename, ssh = sshformat, devices = len(credentials)) for c, v in enumerate(credentials): code += checks.format(i = c, kh = v.keyhandle, pk = v.pubkey, attr = v.attributes, old = v.oldformat) free_block += free.format(i = c) code += free_block + end.format(devices = len(credentials)) print(code) # Single credentials print >> sys.stderr, "Generating single credentials" for r in resident: for p in presence: for n in pin: for v in verification: filename = "credentials/new_" + r + p + v + n print >> sys.stderr, "Generating " + filename + ".templ" line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n]) matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M) with open(filename + ".templ", "w") as outfile: outfile.write(line) credentials = [Credential(keyhandle = matches.group(1), pubkey = matches.group(2), attributes = matches.group(3), oldformat = 0)] print_test_case(filename + ".cred", sshformat, credentials) # Double credentials print >> sys.stderr, "Generating double credentials" for r in resident: for p in presence: for n in pin: for v in verification: filename = "credentials/new_double_" + r + p + v + n print >> sys.stderr, "Generating " + filename + ".templ" line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n]) matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M) with open(filename + ".templ", "w") as outfile: outfile.write(line) credentials = [Credential(keyhandle = matches.group(1), pubkey = matches.group(2), attributes = matches.group(3), oldformat = 0)] line = subprocess.check_output([PUC, "-n", r, p, v, n]) matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M) with open(filename + ".templ", "a") as outfile: outfile.write(line) credentials += [Credential(keyhandle = matches.group(1), pubkey = matches.group(2), attributes = matches.group(3), oldformat = 0)] print_test_case(filename + ".cred", sshformat, credentials) # Mixed credentials print >> sys.stderr, "Mixed double credentials" options = [("", ""), ("", "-P"), ("-P", ""), ("-P", "-P")] for p1, p2 in options: filename = "credentials/new_mixed_" + p1 +"1" + p2 + "2" print >> sys.stderr, "Generating " + filename + ".templ" line = subprocess.check_output([PUC, "-u@USERNAME@", p1]) matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M) with open(filename + ".templ", "w") as outfile: outfile.write(line) credentials = [Credential(keyhandle = matches.group(1), pubkey = matches.group(2), attributes = matches.group(3), oldformat = 0)] line = subprocess.check_output([PUC, "-n", p2]) matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M) with open(filename + ".templ", "a") as outfile: outfile.write(line) credentials += [Credential(keyhandle = matches.group(1), pubkey = matches.group(2), attributes = matches.group(3), oldformat = 0)] print_test_case(filename + ".cred", sshformat, credentials)
[((18, 13, 18, 90), 'collections.namedtuple', 'collections.namedtuple', ({(18, 36, 18, 48): '"""Credential"""', (18, 50, 18, 89): '"""keyhandle pubkey attributes oldformat"""'}, {}), "('Credential', 'keyhandle pubkey attributes oldformat')", False, 'import collections\n'), ((124, 11, 124, 61), 'subprocess.check_output', 'subprocess.check_output', ({(124, 35, 124, 60): "[PUC, '-u@USERNAME@', p1]"}, {}), "([PUC, '-u@USERNAME@', p1])", False, 'import subprocess\n'), ((126, 14, 126, 66), 're.match', 're.match', ({(126, 23, 126, 53): '"""^.*?:(.*?),(.*?),es256,(.*)"""', (126, 55, 126, 59): 'line', (126, 61, 126, 65): 're.M'}, {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)", False, 'import re\n'), ((134, 11, 134, 51), 'subprocess.check_output', 'subprocess.check_output', ({(134, 35, 134, 50): "[PUC, '-n', p2]"}, {}), "([PUC, '-n', p2])", False, 'import subprocess\n'), ((136, 14, 136, 66), 're.match', 're.match', ({(136, 23, 136, 53): '"""^.*?:(.*?),(.*?),es256,(.*)"""', (136, 55, 136, 59): 'line', (136, 61, 136, 65): 're.M'}, {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)", False, 'import re\n'), ((72, 23, 72, 81), 'subprocess.check_output', 'subprocess.check_output', ({(72, 47, 72, 80): "[PUC, '-u@USERNAME@', r, p, v, n]"}, {}), "([PUC, '-u@USERNAME@', r, p, v, n])", False, 'import subprocess\n'), ((74, 26, 74, 78), 're.match', 're.match', ({(74, 35, 74, 65): '"""^.*?:(.*?),(.*?),es256,(.*)"""', (74, 67, 74, 71): 'line', (74, 73, 74, 77): 're.M'}, {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)", False, 'import re\n'), ((94, 23, 94, 81), 'subprocess.check_output', 'subprocess.check_output', ({(94, 47, 94, 80): "[PUC, '-u@USERNAME@', r, p, v, n]"}, {}), "([PUC, '-u@USERNAME@', r, p, v, n])", False, 'import subprocess\n'), ((96, 26, 96, 78), 're.match', 're.match', ({(96, 35, 96, 65): '"""^.*?:(.*?),(.*?),es256,(.*)"""', (96, 67, 96, 71): 'line', (96, 73, 96, 77): 're.M'}, {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)", False, 'import re\n'), ((104, 23, 104, 71), 'subprocess.check_output', 'subprocess.check_output', ({(104, 47, 104, 70): "[PUC, '-n', r, p, v, n]"}, {}), "([PUC, '-n', r, p, v, n])", False, 'import subprocess\n'), ((106, 26, 106, 78), 're.match', 're.match', ({(106, 35, 106, 65): '"""^.*?:(.*?),(.*?),es256,(.*)"""', (106, 67, 106, 71): 'line', (106, 73, 106, 77): 're.M'}, {}), "('^.*?:(.*?),(.*?),es256,(.*)', line, re.M)", False, 'import re\n')]
zhangyujing/tensorflow
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Affine Scalar Tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency from tensorflow.python.platform import test class AffineScalarBijectorTest(test.TestCase): """Tests correctness of the Y = scale @ x + shift transformation.""" def testProperties(self): with self.test_session(): mu = -1. # scale corresponds to 1. bijector = AffineScalar(shift=mu) self.assertEqual("affine_scalar", bijector.name) def testNoBatchScalar(self): with self.test_session() as sess: def static_run(fun, x): return fun(x).eval() def dynamic_run(fun, x_value): x_value = np.array(x_value) x = array_ops.placeholder(dtypes.float32, name="x") return sess.run(fun(x), feed_dict={x: x_value}) for run in (static_run, dynamic_run): mu = -1. # Corresponds to scale = 2 bijector = AffineScalar(shift=mu, scale=2.) x = [1., 2, 3] # Three scalar samples (no batches). self.assertAllClose([1., 3, 5], run(bijector.forward, x)) self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x)) self.assertAllClose([-np.log(2.)] * 3, run(bijector.inverse_log_det_jacobian, x)) def testOneBatchScalarViaIdentityIn64BitUserProvidesShiftOnly(self): with self.test_session() as sess: def static_run(fun, x): return fun(x).eval() def dynamic_run(fun, x_value): x_value = np.array(x_value).astype(np.float64) x = array_ops.placeholder(dtypes.float64, name="x") return sess.run(fun(x), feed_dict={x: x_value}) for run in (static_run, dynamic_run): mu = np.float64([1.]) # One batch, scalar. # Corresponds to scale = 1. bijector = AffineScalar(shift=mu) x = np.float64([1.]) # One sample from one batches. self.assertAllClose([2.], run(bijector.forward, x)) self.assertAllClose([0.], run(bijector.inverse, x)) self.assertAllClose([0.], run(bijector.inverse_log_det_jacobian, x)) def testOneBatchScalarViaIdentityIn64BitUserProvidesScaleOnly(self): with self.test_session() as sess: def static_run(fun, x): return fun(x).eval() def dynamic_run(fun, x_value): x_value = np.array(x_value).astype(np.float64) x = array_ops.placeholder(dtypes.float64, name="x") return sess.run(fun(x), feed_dict={x: x_value}) for run in (static_run, dynamic_run): multiplier = np.float64([2.]) # One batch, scalar. # Corresponds to scale = 2, shift = 0. bijector = AffineScalar(scale=multiplier) x = np.float64([1.]) # One sample from one batches. self.assertAllClose([2.], run(bijector.forward, x)) self.assertAllClose([0.5], run(bijector.inverse, x)) self.assertAllClose([np.log(0.5)], run(bijector.inverse_log_det_jacobian, x)) def testTwoBatchScalarIdentityViaIdentity(self): with self.test_session() as sess: def static_run(fun, x): return fun(x).eval() def dynamic_run(fun, x_value): x_value = np.array(x_value) x = array_ops.placeholder(dtypes.float32, name="x") return sess.run(fun(x), feed_dict={x: x_value}) for run in (static_run, dynamic_run): mu = [1., -1] # Univariate, two batches. # Corresponds to scale = 1. bijector = AffineScalar(shift=mu) x = [1., 1] # One sample from each of two batches. self.assertAllClose([2., 0], run(bijector.forward, x)) self.assertAllClose([0., 2], run(bijector.inverse, x)) self.assertAllClose([0., 0.], run(bijector.inverse_log_det_jacobian, x)) def testTwoBatchScalarIdentityViaScale(self): with self.test_session() as sess: def static_run(fun, x): return fun(x).eval() def dynamic_run(fun, x_value): x_value = np.array(x_value) x = array_ops.placeholder(dtypes.float32, name="x") return sess.run(fun(x), feed_dict={x: x_value}) for run in (static_run, dynamic_run): mu = [1., -1] # Univariate, two batches. # Corresponds to scale = 1. bijector = AffineScalar(shift=mu, scale=[2., 1]) x = [1., 1] # One sample from each of two batches. self.assertAllClose([3., 0], run(bijector.forward, x)) self.assertAllClose([0., 2], run(bijector.inverse, x)) self.assertAllClose( [-np.log(2), 0.], run(bijector.inverse_log_det_jacobian, x)) def testScalarCongruency(self): with self.test_session(): bijector = AffineScalar(shift=3.6, scale=0.42) assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.) if __name__ == "__main__": test.main()
[((153, 2, 153, 13), 'tensorflow.python.platform.test.main', 'test.main', ({}, {}), '()', False, 'from tensorflow.python.platform import test\n'), ((37, 17, 37, 39), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', (), '', False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((149, 17, 149, 52), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', (), '', False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((150, 6, 150, 65), 'tensorflow.python.ops.distributions.bijector_test_util.assert_scalar_congruency', 'assert_scalar_congruency', (), '', False, 'from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency\n'), ((47, 18, 47, 35), 'numpy.array', 'np.array', ({(47, 27, 47, 34): 'x_value'}, {}), '(x_value)', True, 'import numpy as np\n'), ((48, 12, 48, 59), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (), '', False, 'from tensorflow.python.ops import array_ops\n'), ((54, 19, 54, 51), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', (), '', False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((69, 12, 69, 59), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (), '', False, 'from tensorflow.python.ops import array_ops\n'), ((73, 13, 73, 29), 'numpy.float64', 'np.float64', ({(73, 24, 73, 28): '[1.0]'}, {}), '([1.0])', True, 'import numpy as np\n'), ((76, 19, 76, 41), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', (), '', False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((77, 12, 77, 28), 'numpy.float64', 'np.float64', ({(77, 23, 77, 27): '[1.0]'}, {}), '([1.0])', True, 'import numpy as np\n'), ((90, 12, 90, 59), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (), '', False, 'from tensorflow.python.ops import array_ops\n'), ((94, 21, 94, 37), 'numpy.float64', 'np.float64', ({(94, 32, 94, 36): '[2.0]'}, {}), '([2.0])', True, 'import numpy as np\n'), ((97, 19, 97, 49), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', (), '', False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((98, 12, 98, 28), 'numpy.float64', 'np.float64', ({(98, 23, 98, 27): '[1.0]'}, {}), '([1.0])', True, 'import numpy as np\n'), ((111, 18, 111, 35), 'numpy.array', 'np.array', ({(111, 27, 111, 34): 'x_value'}, {}), '(x_value)', True, 'import numpy as np\n'), ((112, 12, 112, 59), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (), '', False, 'from tensorflow.python.ops import array_ops\n'), ((119, 19, 119, 41), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', (), '', False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((132, 18, 132, 35), 'numpy.array', 'np.array', ({(132, 27, 132, 34): 'x_value'}, {}), '(x_value)', True, 'import numpy as np\n'), ((133, 12, 133, 59), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (), '', False, 'from tensorflow.python.ops import array_ops\n'), ((140, 19, 140, 56), 'tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar.AffineScalar', 'AffineScalar', (), '', False, 'from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar\n'), ((68, 18, 68, 35), 'numpy.array', 'np.array', ({(68, 27, 68, 34): 'x_value'}, {}), '(x_value)', True, 'import numpy as np\n'), ((89, 18, 89, 35), 'numpy.array', 'np.array', ({(89, 27, 89, 34): 'x_value'}, {}), '(x_value)', True, 'import numpy as np\n'), ((101, 29, 101, 40), 'numpy.log', 'np.log', ({(101, 36, 101, 39): '(0.5)'}, {}), '(0.5)', True, 'import numpy as np\n'), ((145, 14, 145, 23), 'numpy.log', 'np.log', ({(145, 21, 145, 22): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((58, 30, 58, 40), 'numpy.log', 'np.log', ({(58, 37, 58, 39): '(2.0)'}, {}), '(2.0)', True, 'import numpy as np\n')]
bricerisingalgorand/mule
mule/util/algorand_util.py
721b73f691076e5c3e2ebb8a79313da486fb0f96
import os import subprocess import json import urllib.request from mule.util import os_util from mule.util import file_util from mule.util import time_util from mule.util import s3_util from mule.util import semver_util import platform def build_algo_release_url(package_type, channel, os_type, cpu_arch_type, package_version): return f"https://algorand-releases.s3.amazonaws.com/channel/{channel}/{package_type}_{channel}_{os_type}-{cpu_arch_type}_{package_version}.tar.gz" def get_latest_package_version(package_type, channel, os_type, cpu_arch_type): os_type = os_util.get_os_type() cpu_arch_type = os_util.get_cpu_arch_type() package_keys = list(s3_util.get_matching_s3_keys( 'algorand-releases', f"channel/{channel}/{package_type}_{channel}_{os_type}-{cpu_arch_type}_", 'tar.gz', s3_auth=False )) package_versions = list(map(semver_util.parse_version, package_keys)) latest_version = semver_util.get_highest_version(package_versions) print(f"Found latest version of package type {package_type} for channel {channel}: {latest_version}") return latest_version def install_node(data_dir, bin_dir, channel, node_package_version='latest'): """ Download and install algod. """ node_package_dir = file_util.ensure_folder(f"/tmp/algod-pkg-{time_util.get_timestamp()}") data_dir = file_util.ensure_folder(data_dir) bin_dir = file_util.ensure_folder(bin_dir) os_type = os_util.get_os_type() cpu_arch_type = os_util.get_cpu_arch_type() if node_package_version == 'latest': if channel == 'test': node_package_version = get_latest_package_version('node', 'stable', os_type, cpu_arch_type) else: node_package_version = get_latest_package_version('node', channel, os_type, cpu_arch_type) print(f"Installing {channel} node package version {node_package_version} to:\n\tbin_dir: {bin_dir}\n\tdata_dir: {data_dir}") node_package_url = build_algo_release_url('node', channel, os_type, cpu_arch_type, node_package_version) if channel == 'test': node_package_url = build_algo_release_url('node', 'stable', os_type, cpu_arch_type, node_package_version) node_package_tar_path = f"{node_package_dir}/node_package.tar.gz" _ = urllib.request.urlretrieve(node_package_url, node_package_tar_path) file_util.decompressTarfile(node_package_tar_path, f"{node_package_dir}") file_util.mv_folder_contents(f"{node_package_dir}/data", data_dir) file_util.mv_folder_contents(f"{node_package_dir}/bin", bin_dir) if channel == 'stable': file_util.copy_file( os.path.join(node_package_dir, "genesis/mainnet/genesis.json"), os.path.join(data_dir, 'genesis.json') ) else: file_util.copy_file( os.path.join(node_package_dir, f"genesis/{channel}net/genesis.json"), os.path.join(data_dir, 'genesis.json') ) def show_node_configs(data_dir, kmd_dir): data_dir = file_util.ensure_folder(data_dir) kmd_dir = file_util.ensure_folder(kmd_dir) node_config_path = f"{data_dir}/config.json" kmd_config_path = f"{kmd_dir}/kmd_config.json" file_util.ensure_file(node_config_path, '{}') file_util.ensure_file(kmd_config_path, '{}') current_node_config = file_util.read_json_file(node_config_path) current_kmd_config = file_util.read_json_file(kmd_config_path) print(f"Showing node configs at {node_config_path} with:\n{json.dumps(current_node_config, sort_keys=True, indent=4)}") print(f"Showing node configs at {kmd_config_path} with:\n{json.dumps(current_kmd_config, sort_keys=True, indent=4)}") def configure_node(data_dir, kmd_dir, node_config, kmd_config): data_dir = file_util.ensure_folder(data_dir) kmd_dir = file_util.ensure_folder(kmd_dir) node_config_path = f"{data_dir}/config.json" kmd_config_path = f"{kmd_dir}/kmd_config.json" file_util.ensure_file(node_config_path, '{}') file_util.ensure_file(kmd_config_path, '{}') current_node_config = file_util.read_json_file(node_config_path) current_kmd_config = file_util.read_json_file(kmd_config_path) current_node_config.update(node_config) current_kmd_config.update(kmd_config) print(f"Updating node configs at {node_config_path} with:\n{json.dumps(node_config, sort_keys=True, indent=4)}") print(f"Updating node configs at {kmd_config_path} with:\n{json.dumps(kmd_config, sort_keys=True, indent=4)}") file_util.write_json_file(node_config_path, current_node_config) file_util.write_json_file(kmd_config_path, current_kmd_config) def start_node(data_dir, kmd_dir, bin_dir=None): goal_args = [ 'node', 'start', ] print(f"Starting node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}") goal(data_dir, kmd_dir, goal_args, bin_dir) def stop_node(data_dir, kmd_dir, bin_dir=None): goal_args = [ 'node', 'stop', ] print(f"Stopping node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}") goal(data_dir, kmd_dir, goal_args, bin_dir) def restart_node(data_dir, kmd_dir, bin_dir=None): goal_args = [ 'node', 'restart', ] print(f"Restarting node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}") goal(data_dir, kmd_dir, goal_args, bin_dir) def status_node(data_dir, kmd_dir, bin_dir=None): goal_args = [ 'node', 'status', ] print(f"Status of node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}") goal(data_dir, kmd_dir, goal_args, bin_dir) def goal(data_dir, kmd_dir, args, bin_dir=None): goal_command = ['goal'] if not bin_dir is None: goal_command = [f"{bin_dir}/goal"] goal_command.extend([ '-d', data_dir, '-k', kmd_dir, ]) goal_command.extend(args) subprocess.run(goal_command, check=True) def algorand_indexer(args, bin_dir=None, log_file_name=None): algorand_indexer_command = ['algorand-indexer'] if not bin_dir is None: algorand_indexer_command = [f"{bin_dir}/algorand-indexer"] if log_file_name is None: log_file_name = f"indexer-{time_util.get_timestamp()}.log" algorand_indexer_command.extend(args) log_file = open(log_file_name, 'w') subprocess.Popen(algorand_indexer_command, stdout=log_file, stderr=log_file) def start_indexer_local_node(node, postgres, bin_dir=None, pid_file=None, log_file_name=None): algorand_indexer_args = ['daemon'] algorand_indexer_args.extend([ '-d', node['data'], '--postgres', build_indexer_postgress_connection_string(postgres) ]) if not pid_file is None: algorand_indexer_args.extend([ '--pidfile', pid_file ]) algorand_indexer(algorand_indexer_args, bin_dir, log_file_name) def start_indexer_remote_node(node, postgres, bin_dir=None, pid_file=None, log_file_name=None): algorand_indexer_args = ['daemon'] algorand_indexer_args.extend([ '--algod-net', f"{node['host']}:{node['port']}", '--algod-token', node['token'], '--genesis', node['genesis'], '--postgres', build_indexer_postgress_connection_string(postgres) ]) if not pid_file is None: algorand_indexer_args.extend([ '--pidfile', pid_file ]) algorand_indexer(algorand_indexer_args, bin_dir, log_file_name) def build_indexer_postgress_connection_string(postgres): postgress_connection_string = [] for field in postgres.items(): postgress_connection_string.append(f"{field[0]}={field[1]}") return ' '.join(postgress_connection_string)
[((16, 14, 16, 35), 'mule.util.os_util.get_os_type', 'os_util.get_os_type', ({}, {}), '()', False, 'from mule.util import os_util\n'), ((17, 20, 17, 47), 'mule.util.os_util.get_cpu_arch_type', 'os_util.get_cpu_arch_type', ({}, {}), '()', False, 'from mule.util import os_util\n'), ((25, 21, 25, 70), 'mule.util.semver_util.get_highest_version', 'semver_util.get_highest_version', ({(25, 53, 25, 69): 'package_versions'}, {}), '(package_versions)', False, 'from mule.util import semver_util\n'), ((34, 15, 34, 48), 'mule.util.file_util.ensure_folder', 'file_util.ensure_folder', ({(34, 39, 34, 47): 'data_dir'}, {}), '(data_dir)', False, 'from mule.util import file_util\n'), ((35, 14, 35, 46), 'mule.util.file_util.ensure_folder', 'file_util.ensure_folder', ({(35, 38, 35, 45): 'bin_dir'}, {}), '(bin_dir)', False, 'from mule.util import file_util\n'), ((37, 14, 37, 35), 'mule.util.os_util.get_os_type', 'os_util.get_os_type', ({}, {}), '()', False, 'from mule.util import os_util\n'), ((38, 20, 38, 47), 'mule.util.os_util.get_cpu_arch_type', 'os_util.get_cpu_arch_type', ({}, {}), '()', False, 'from mule.util import os_util\n'), ((54, 4, 54, 77), 'mule.util.file_util.decompressTarfile', 'file_util.decompressTarfile', ({(54, 32, 54, 53): 'node_package_tar_path', (54, 55, 54, 76): 'f"""{node_package_dir}"""'}, {}), "(node_package_tar_path, f'{node_package_dir}')", False, 'from mule.util import file_util\n'), ((56, 4, 56, 70), 'mule.util.file_util.mv_folder_contents', 'file_util.mv_folder_contents', ({(56, 33, 56, 59): 'f"""{node_package_dir}/data"""', (56, 61, 56, 69): 'data_dir'}, {}), "(f'{node_package_dir}/data', data_dir)", False, 'from mule.util import file_util\n'), ((57, 4, 57, 68), 'mule.util.file_util.mv_folder_contents', 'file_util.mv_folder_contents', ({(57, 33, 57, 58): 'f"""{node_package_dir}/bin"""', (57, 60, 57, 67): 'bin_dir'}, {}), "(f'{node_package_dir}/bin', bin_dir)", False, 'from mule.util import file_util\n'), ((70, 15, 70, 48), 'mule.util.file_util.ensure_folder', 'file_util.ensure_folder', ({(70, 39, 70, 47): 'data_dir'}, {}), '(data_dir)', False, 'from mule.util import file_util\n'), ((71, 14, 71, 46), 'mule.util.file_util.ensure_folder', 'file_util.ensure_folder', ({(71, 38, 71, 45): 'kmd_dir'}, {}), '(kmd_dir)', False, 'from mule.util import file_util\n'), ((75, 4, 75, 49), 'mule.util.file_util.ensure_file', 'file_util.ensure_file', ({(75, 26, 75, 42): 'node_config_path', (75, 44, 75, 48): '"""{}"""'}, {}), "(node_config_path, '{}')", False, 'from mule.util import file_util\n'), ((76, 4, 76, 48), 'mule.util.file_util.ensure_file', 'file_util.ensure_file', ({(76, 26, 76, 41): 'kmd_config_path', (76, 43, 76, 47): '"""{}"""'}, {}), "(kmd_config_path, '{}')", False, 'from mule.util import file_util\n'), ((78, 26, 78, 68), 'mule.util.file_util.read_json_file', 'file_util.read_json_file', ({(78, 51, 78, 67): 'node_config_path'}, {}), '(node_config_path)', False, 'from mule.util import file_util\n'), ((79, 25, 79, 66), 'mule.util.file_util.read_json_file', 'file_util.read_json_file', ({(79, 50, 79, 65): 'kmd_config_path'}, {}), '(kmd_config_path)', False, 'from mule.util import file_util\n'), ((86, 15, 86, 48), 'mule.util.file_util.ensure_folder', 'file_util.ensure_folder', ({(86, 39, 86, 47): 'data_dir'}, {}), '(data_dir)', False, 'from mule.util import file_util\n'), ((87, 14, 87, 46), 'mule.util.file_util.ensure_folder', 'file_util.ensure_folder', ({(87, 38, 87, 45): 'kmd_dir'}, {}), '(kmd_dir)', False, 'from mule.util import file_util\n'), ((91, 4, 91, 49), 'mule.util.file_util.ensure_file', 'file_util.ensure_file', ({(91, 26, 91, 42): 'node_config_path', (91, 44, 91, 48): '"""{}"""'}, {}), "(node_config_path, '{}')", False, 'from mule.util import file_util\n'), ((92, 4, 92, 48), 'mule.util.file_util.ensure_file', 'file_util.ensure_file', ({(92, 26, 92, 41): 'kmd_config_path', (92, 43, 92, 47): '"""{}"""'}, {}), "(kmd_config_path, '{}')", False, 'from mule.util import file_util\n'), ((94, 26, 94, 68), 'mule.util.file_util.read_json_file', 'file_util.read_json_file', ({(94, 51, 94, 67): 'node_config_path'}, {}), '(node_config_path)', False, 'from mule.util import file_util\n'), ((95, 25, 95, 66), 'mule.util.file_util.read_json_file', 'file_util.read_json_file', ({(95, 50, 95, 65): 'kmd_config_path'}, {}), '(kmd_config_path)', False, 'from mule.util import file_util\n'), ((103, 4, 103, 68), 'mule.util.file_util.write_json_file', 'file_util.write_json_file', ({(103, 30, 103, 46): 'node_config_path', (103, 48, 103, 67): 'current_node_config'}, {}), '(node_config_path, current_node_config)', False, 'from mule.util import file_util\n'), ((104, 4, 104, 66), 'mule.util.file_util.write_json_file', 'file_util.write_json_file', ({(104, 30, 104, 45): 'kmd_config_path', (104, 47, 104, 65): 'current_kmd_config'}, {}), '(kmd_config_path, current_kmd_config)', False, 'from mule.util import file_util\n'), ((149, 4, 149, 44), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n'), ((160, 4, 160, 80), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((18, 24, 23, 5), 'mule.util.s3_util.get_matching_s3_keys', 's3_util.get_matching_s3_keys', (), '', False, 'from mule.util import s3_util\n'), ((60, 12, 60, 74), 'os.path.join', 'os.path.join', ({(60, 25, 60, 41): 'node_package_dir', (60, 43, 60, 73): '"""genesis/mainnet/genesis.json"""'}, {}), "(node_package_dir, 'genesis/mainnet/genesis.json')", False, 'import os\n'), ((61, 12, 61, 50), 'os.path.join', 'os.path.join', ({(61, 25, 61, 33): 'data_dir', (61, 35, 61, 49): '"""genesis.json"""'}, {}), "(data_dir, 'genesis.json')", False, 'import os\n'), ((65, 12, 65, 80), 'os.path.join', 'os.path.join', ({(65, 25, 65, 41): 'node_package_dir', (65, 43, 65, 79): 'f"""genesis/{channel}net/genesis.json"""'}, {}), "(node_package_dir, f'genesis/{channel}net/genesis.json')", False, 'import os\n'), ((66, 12, 66, 50), 'os.path.join', 'os.path.join', ({(66, 25, 66, 33): 'data_dir', (66, 35, 66, 49): '"""genesis.json"""'}, {}), "(data_dir, 'genesis.json')", False, 'import os\n'), ((33, 65, 33, 90), 'mule.util.time_util.get_timestamp', 'time_util.get_timestamp', ({}, {}), '()', False, 'from mule.util import time_util\n'), ((81, 63, 81, 120), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((82, 62, 82, 118), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((100, 64, 100, 113), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((101, 63, 101, 111), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((156, 35, 156, 60), 'mule.util.time_util.get_timestamp', 'time_util.get_timestamp', ({}, {}), '()', False, 'from mule.util import time_util\n')]
allbuttonspressed/pyjs
examples/showcase/src/demos_panels/scrollPanel.py
c726fdead530eb63ee4763ae15daaa58d84cd58f
""" The ``ui.ScrollPanel`` class implements a panel that scrolls its contents. If you want the scroll bars to be always visible, call ``setAlwaysShowScrollBars(True)``. You can also change the current scrolling position programmatically by calling ``setScrollPosition(vPos)`` and ``setScrollHorizontalPosition(hPos)`` to change the horizontal and vertical scrolling position, respectively. It is in the nature of a scrollpanel that if you give it a relative size, it will not work. This makes it tricky to use it where you want it to fill out a parent widget of unknown size. To avoid this problem you will have to wrap its content in a SimplePanel and then use css/oveflow to control its behaviour as shown in the second example: "container" represents the parent widget that could be any absolute or relative size and the superscrollpanel will fill it out and apply vertical scrollbars if needed. """ from pyjamas.ui.SimplePanel import SimplePanel from pyjamas.ui.ScrollPanel import ScrollPanel from pyjamas.ui.HTML import HTML from pyjamas.ui.VerticalPanel import VerticalPanel class ScrollPanelDemo(SimplePanel): def __init__(self): SimplePanel.__init__(self) vert = VerticalPanel() vert.setSpacing("10px") self.add(vert) panel = ScrollPanel(Size=("300px", "100px")) contents = HTML("<b>Tao Te Ching, Chapter One</b><p>" + "The Way that can be told of is not an unvarying " + "way;<p>The names that can be named are not " + "unvarying names.<p>It was from the Nameless that " + "Heaven and Earth sprang;<p>The named is but the " + "mother that rears the ten thousand creatures, " + "each after its kind.") panel.add(contents) vert.add(panel) container = SimplePanel(Width="400px", Height="200px") contents2 = HTML(50*"Dont forget to grab the css for SuperScrollPanel in Showcase.css! ") panel2 = SuperScrollPanel(contents2) container.add(panel2) vert.add(container) class SuperScrollPanel(ScrollPanel): def __init__(self, panel): ScrollPanel.__init__(self) self.setHeight("100%") self.setStyleName("SuperScrollPanelOuter") self.inner = SimplePanel(Height="100%") self.add(self.inner) self.inner.setStyleName("SuperScrollPanelInner") self.inner.add(panel)
[((25, 8, 25, 34), 'pyjamas.ui.SimplePanel.SimplePanel.__init__', 'SimplePanel.__init__', ({(25, 29, 25, 33): 'self'}, {}), '(self)', False, 'from pyjamas.ui.SimplePanel import SimplePanel\n'), ((26, 15, 26, 30), 'pyjamas.ui.VerticalPanel.VerticalPanel', 'VerticalPanel', ({}, {}), '()', False, 'from pyjamas.ui.VerticalPanel import VerticalPanel\n'), ((30, 16, 30, 52), 'pyjamas.ui.ScrollPanel.ScrollPanel', 'ScrollPanel', (), '', False, 'from pyjamas.ui.ScrollPanel import ScrollPanel\n'), ((32, 19, 38, 47), 'pyjamas.ui.HTML.HTML', 'HTML', ({(32, 24, 38, 46): "'<b>Tao Te Ching, Chapter One</b><p>' + 'The Way that can be told of is not an unvarying ' + 'way;<p>The names that can be named are not ' + 'unvarying names.<p>It was from the Nameless that ' + 'Heaven and Earth sprang;<p>The named is but the ' + 'mother that rears the ten thousand creatures, ' + 'each after its kind.'"}, {}), "('<b>Tao Te Ching, Chapter One</b><p>' +\n 'The Way that can be told of is not an unvarying ' +\n 'way;<p>The names that can be named are not ' +\n 'unvarying names.<p>It was from the Nameless that ' +\n 'Heaven and Earth sprang;<p>The named is but the ' +\n 'mother that rears the ten thousand creatures, ' + 'each after its kind.')", False, 'from pyjamas.ui.HTML import HTML\n'), ((42, 20, 42, 62), 'pyjamas.ui.SimplePanel.SimplePanel', 'SimplePanel', (), '', False, 'from pyjamas.ui.SimplePanel import SimplePanel\n'), ((43, 20, 43, 97), 'pyjamas.ui.HTML.HTML', 'HTML', ({(43, 25, 43, 96): "50 * 'Dont forget to grab the css for SuperScrollPanel in Showcase.css! '"}, {}), "(50 * 'Dont forget to grab the css for SuperScrollPanel in Showcase.css! ')", False, 'from pyjamas.ui.HTML import HTML\n'), ((50, 8, 50, 34), 'pyjamas.ui.ScrollPanel.ScrollPanel.__init__', 'ScrollPanel.__init__', ({(50, 29, 50, 33): 'self'}, {}), '(self)', False, 'from pyjamas.ui.ScrollPanel import ScrollPanel\n'), ((54, 21, 54, 47), 'pyjamas.ui.SimplePanel.SimplePanel', 'SimplePanel', (), '', False, 'from pyjamas.ui.SimplePanel import SimplePanel\n')]
xiongchiamiov/phone-suitable-domain-name
psdn.py
da8d28c5783415f406e19b8ef2cde4c790a4c95d
#!/usr/bin/env python3 # May you recognize your weaknesses and share your strengths. # May you share freely, never taking more than you give. # May you find love and love everyone you find. import re import time import whois phone_spellable = re.compile(r'^[filoqrsuwxy]+$') candidate_words = [] with open('/usr/share/dict/words') as f: for word in f: word = word.strip() if phone_spellable.match(word): candidate_words.append((len(word), word)) candidate_words.sort() for word in candidate_words: query = False while query is False: try: query = whois.query('%s.com' % word[1]) except: print("Sleeping five seconds...") time.sleep(5) if not query: print(word)
[((12, 18, 12, 49), 're.compile', 're.compile', ({(12, 29, 12, 48): '"""^[filoqrsuwxy]+$"""'}, {}), "('^[filoqrsuwxy]+$')", False, 'import re\n'), ((27, 20, 27, 51), 'whois.query', 'whois.query', ({(27, 32, 27, 50): "'%s.com' % word[1]"}, {}), "('%s.com' % word[1])", False, 'import whois\n'), ((30, 12, 30, 25), 'time.sleep', 'time.sleep', ({(30, 23, 30, 24): '(5)'}, {}), '(5)', False, 'import time\n')]
akzare/Elevator_Sys_Design
src/requester/py/ElevatorTestCaseList.py
2f7d7381d68699515a43ec4cf7a8a8afade726f3
''' * @file ElevatorTestCaseList.py * @author Armin Zare Zadeh * @date 30 July 2020 * @version 0.1 * @brief Implements a class to hold all the test cases during the program life cycle. ''' #!/usr/bin/env python3 import sys import ctypes import ElevatorConfig as cfg import ElevatorMsgProtocol as msgProto class ElevatorTestCaseList: ''' This class builds a test case list out of the configuration and holds it during the runtime ''' def __init__(self, config): self.config = config self.CallGoTCList = [] def create_testcase_list(self): ''' Creates a test case list out of the configuration ''' # ############################################################ # Construct 'call' test cases for k in self.config.test_case['call'].keys(): msgHdr = msgProto.MsgHeader(tx_node_addr = self.config.test_case['call'][k][0], rx_node_addr = self.config.test_case['call'][k][1], msg_id = self.config.test_case['call'][k][2], msg_class = self.config.test_case['call'][k][3], hdr_len = self.config.network['packet_header_len'], payload_len = self.config.network['packet_payload_req_len']) self.CallGoTCList.append(msgProto.EncodeReqPacket(msg_header = msgHdr, time_tag = self.config.test_case['call'][k][4], req_typ = self.config.usr_request['call'], floor_num = self.config.test_case['call'][k][5], direction = self.config.test_case['call'][k][6], go_msg_id = self.config.test_case['call'][k][7], state = msgProto.CallGoState.READY2GO)) # ############################################################ # Construct 'go' test cases for k in self.config.test_case['go'].keys(): msgHdr = msgProto.MsgHeader(tx_node_addr = self.config.test_case['go'][k][0], rx_node_addr = self.config.test_case['go'][k][1], msg_id = self.config.test_case['go'][k][2], msg_class = self.config.test_case['go'][k][3], hdr_len = self.config.network['packet_header_len'], payload_len = self.config.network['packet_payload_req_len']) self.CallGoTCList.append(msgProto.EncodeReqPacket(msg_header = msgHdr, time_tag = self.config.test_case['go'][k][4], req_typ = self.config.usr_request['go'], floor_num = self.config.test_case['go'][k][5], direction = 0, go_msg_id = 0, state = msgProto.CallGoState.RESET))
[((35, 15, 40, 94), 'ElevatorMsgProtocol.MsgHeader', 'msgProto.MsgHeader', (), '', True, 'import ElevatorMsgProtocol as msgProto\n'), ((53, 15, 58, 94), 'ElevatorMsgProtocol.MsgHeader', 'msgProto.MsgHeader', (), '', True, 'import ElevatorMsgProtocol as msgProto\n'), ((41, 31, 47, 94), 'ElevatorMsgProtocol.EncodeReqPacket', 'msgProto.EncodeReqPacket', (), '', True, 'import ElevatorMsgProtocol as msgProto\n'), ((59, 31, 65, 91), 'ElevatorMsgProtocol.EncodeReqPacket', 'msgProto.EncodeReqPacket', (), '', True, 'import ElevatorMsgProtocol as msgProto\n')]
pmaigutyak/mp-cart
cart/views.py
53adbbdeea7f8f8b2d432b103f7347d89adf3e30
from django.utils.translation import ugettext from django.views.decorators.http import require_POST from django.http import JsonResponse from django.shortcuts import render from django.core.exceptions import ValidationError from django.views.decorators.csrf import csrf_exempt from cart.lib import get_cart from cart.forms import SelectProductForm, SetQtyForm @require_POST def _cart_action_view(request, action_factory, form_class, message): form = form_class(data=request.POST) if not form.is_valid(): return JsonResponse({'message': form.errors.as_json()}, status=403) cart = get_cart(request) try: result = action_factory(cart, form.cleaned_data) except ValidationError as e: return JsonResponse({'message': ', '.join(e.messages)}, status=403) return JsonResponse({ 'message': message, 'result': result, 'total': cart.printable_total }) def add(request): return _cart_action_view( request, action_factory=lambda cart, data: cart.add(**data), form_class=SelectProductForm, message=ugettext('Product added to cart') ) def remove(request): return _cart_action_view( request, action_factory=lambda cart, data: cart.remove(**data), form_class=SelectProductForm, message=ugettext('Product removed from cart') ) def get_modal(request): cart = get_cart(request) return render(request, 'cart/modal.html', {'cart': cart}) @csrf_exempt def set_qty(request): return _cart_action_view( request, action_factory=lambda cart, data: cart.set_qty(**data), form_class=SetQtyForm, message=ugettext('Quantity updated') )
[((21, 11, 21, 28), 'cart.lib.get_cart', 'get_cart', ({(21, 20, 21, 27): 'request'}, {}), '(request)', False, 'from cart.lib import get_cart\n'), ((28, 11, 32, 6), 'django.http.JsonResponse', 'JsonResponse', ({(28, 24, 32, 5): "{'message': message, 'result': result, 'total': cart.printable_total}"}, {}), "({'message': message, 'result': result, 'total': cart.\n printable_total})", False, 'from django.http import JsonResponse\n'), ((54, 11, 54, 28), 'cart.lib.get_cart', 'get_cart', ({(54, 20, 54, 27): 'request'}, {}), '(request)', False, 'from cart.lib import get_cart\n'), ((55, 11, 55, 61), 'django.shortcuts.render', 'render', ({(55, 18, 55, 25): 'request', (55, 27, 55, 44): '"""cart/modal.html"""', (55, 46, 55, 60): "{'cart': cart}"}, {}), "(request, 'cart/modal.html', {'cart': cart})", False, 'from django.shortcuts import render\n'), ((40, 16, 40, 49), 'django.utils.translation.ugettext', 'ugettext', ({(40, 25, 40, 48): '"""Product added to cart"""'}, {}), "('Product added to cart')", False, 'from django.utils.translation import ugettext\n'), ((49, 16, 49, 53), 'django.utils.translation.ugettext', 'ugettext', ({(49, 25, 49, 52): '"""Product removed from cart"""'}, {}), "('Product removed from cart')", False, 'from django.utils.translation import ugettext\n'), ((64, 16, 64, 44), 'django.utils.translation.ugettext', 'ugettext', ({(64, 25, 64, 43): '"""Quantity updated"""'}, {}), "('Quantity updated')", False, 'from django.utils.translation import ugettext\n')]
darius-luca-tech/AI_Projects
ChessAI/src/const.py
3cff26878807121e077375e5dbef39390fea0189
#------ game constants -----# #players WHITE = 0 BLACK = 1 BOTH = 2 #color for onTurnLabel PLAYER_COLOR = ["white", "black"] #figures PAWN = 1 KNIGHT = 2 BISHOP = 3 ROOK = 4 QUEEN = 5 KING = 6 FIGURE_NAME = [ "", "pawn", "knight", "bishop", "rook", "queen", "king" ] #used in move 32bit for promotion figure prom_figure = figure-2 PROM_KNIGHT = 0 PROM_BISHOP = 1 PROM_ROOK = 2 PROM_QUEEN = 3 #all lines A, B, C, D, E, F, G, H = range(8) #all squares A1, B1, C1, D1, E1, F1, G1, H1, \ A2, B2, C2, D2, E2, F2, G2, H2, \ A3, B3, C3, D3, E3, F3, G3, H3, \ A4, B4, C4, D4, E4, F4, G4, H4, \ A5, B5, C5, D5, E5, F5, G5, H5, \ A6, B6, C6, D6, E6, F6, G6, H6, \ A7, B7, C7, D7, E7, F7, G7, H7, \ A8, B8, C8, D8, E8, F8, G8, H8 = range(64) #----- game display constants -----# DEFAULTBORDERWIDTH = 20 DEFAULTTILEWIDTH = 45 DEFAULTFONTSIZE = (7, 15) COLORS = { "bg":"#EDC08C", "border":"#B55602", "tiles":("#FC9235", "#FFB87A") } #----- move types -----# NORMAL_MOVE, CAPTURE, PROMOTION, DOUBLE_STEP, ENPASSANT_CAPTURE, CASTLING, KING_CAPTURE = range(7) #----- move 32bit reservation -----# # a single move is stored in 32 bit as follows # xxxxxxxx xx x xxx xxx xxxxxx xxxxxx xxx # G F E D C B A # # A: move type (0-6) # B: start sq (0-63) # C: destination sq (0-63) # D: start figure (1-6) # E: captured figure (1-6) # F: color of moved piece (0-1) # G: promotion figure (0-3) #NAME = (start_bit, lenght) MOVE_TYPE = (0, 3) MOVE_START = (3, 6) MOVE_DEST = (9, 6) MOVE_FIG_START = (15, 3) MOVE_FIG_CAPTURE = (18, 3) MOVE_COLOR = (21, 1) MOVE_PROM = (22, 2) #----- castling -----# CASTLING_LEFT = 0 CASTLING_RIGHT = 1 #----- player status -----# IDELING = 0 PICKING = 1 INF = 1000000 ASCII_FIG = [[],[]] ASCII_FIG[WHITE] = [ 'x', chr(9817), chr(9816), chr(9815), chr(9814), chr(9813), chr(9812)] ASCII_FIG[BLACK] = [ 'x', chr(9823), chr(9822), chr(9821), chr(9820), chr(9819), chr(9818)] #AI constants CASTLING_RIGHT_LOSS_PENALTY = -40
[]
kapzlok2408/Pokemon-Showdown-Node-Bot
agent.py
c759eb9106fd2a3da3ebe4692a6730c37b2e5ee3
import gym import gym_pokemon import random if __name__ == "__main__": env = gym.make("Pokemon-v0") total_reward = 0.0 total_steps = 0 obs = env.reset() while True: action = random.randint(-1,8) obs, reward, done, _ = env.step(action) total_reward += reward total_steps += 1 print("Currently %d steps, total reward of %.2f" % (total_steps, total_reward)) if done: break
[((6, 7, 6, 29), 'gym.make', 'gym.make', ({(6, 16, 6, 28): '"""Pokemon-v0"""'}, {}), "('Pokemon-v0')", False, 'import gym\n'), ((12, 11, 12, 31), 'random.randint', 'random.randint', ({(12, 26, 12, 28): '-1', (12, 29, 12, 30): '8'}, {}), '(-1, 8)', False, 'import random\n')]
victor-da-costa/Aprendendo-Python
Curso-Em-Video-Python/Mundo-2/EXs/EX038.py
8fd19b93a13953cda30de02de7dac22b4e62fb5b
num1 = int(input('Digite o 1º número: ')) num2 = int(input('Digite o 2º número: ')) if num1 > num2: print('O {} é maior que {}'.format(num1, num2)) elif num1 < num2: print('O {} é maior que4 {}'.format(num2, num1)) else: print('Os números são iguais')
[]
danjjl/ipyfilechooser
setup.py
19d2e906207b2c3426675eda7889267f5956b182
#!/usr/bin/env python import os from setuptools import setup, find_packages def read(fname): """Open files relative to package.""" return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name='ipyfilechooser', version='0.3.1', author='Thomas Bouve (@crahan)', author_email='[email protected]', description=( 'Python file chooser widget for use in ' 'Jupyter/IPython in conjunction with ipywidgets' ), long_description=read('README.md'), long_description_content_type='text/markdown', url='https://github.com/crahan/ipyfilechooser', license='MIT', packages=find_packages(), classifiers=[ 'Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', ], install_requires=[ 'ipywidgets' ] )
[((24, 13, 24, 28), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import setup, find_packages\n'), ((8, 29, 8, 54), 'os.path.dirname', 'os.path.dirname', ({(8, 45, 8, 53): '__file__'}, {}), '(__file__)', False, 'import os\n')]
mithro/chromium-infra
appengine/chromium_build_logs/handler.py
d27ac0b230bedae4bc968515b02927cf9e17c2b7
# Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import appengine_config import datetime import json import logging import os.path import pickle import sys import urllib sys.path.append( os.path.join(os.path.abspath(os.path.dirname(__file__)), 'third_party')) from google.appengine.ext import blobstore from google.appengine.ext import db from google.appengine.ext import deferred from google.appengine.ext import webapp from google.appengine.ext.webapp import blobstore_handlers from google.appengine.ext.webapp import template from google.appengine.ext.webapp.util import run_wsgi_app import cloudstorage import app import gtest_parser # pylint: disable=pointless-string-statement """When displaying a list of results, how many to display on one page.""" PAGE_SIZE = 100 def _clean_int(value, default): """Convert a value to an int, or the default value if conversion fails.""" try: return int(value) except (TypeError, ValueError), _: return default class MyRequestHandler(webapp.RequestHandler): """Base request handler with this application specific helpers.""" def _render_template(self, name, values): """ Wrapper for template.render that updates response and knows where to look for templates. """ self.response.out.write(template.render( os.path.join(os.path.dirname(__file__), 'templates', name), values)) class StatusReceiverAction(MyRequestHandler): def post(self): # This handler should be extremely fast so that buildbot doesn't fail # the push and doesn't get stuck on us. Defer all processing to the # background. try: deferred.defer(app.process_status_push, self.request.body, _queue='fast') except Exception: # For large requests we have to do it now. We can't return HTTP 500 # because buildbot will try again. app.process_status_push(self.request.body) class FetchBuildersAction(MyRequestHandler): def get(self): deferred.defer(app.fetch_builders) class FetchStepsAction(MyRequestHandler): def get(self): deferred.defer(app.fetch_steps) class UpdateParsedDataAction(MyRequestHandler): def get(self): query = app.BuildStep.all(keys_only=True) query.filter('is_fetched =', True) query.filter('is_too_large =', False) deferred.defer(app.for_all_entities, query, app.update_parsed_data, None) class MainAction(MyRequestHandler): def get(self): self._render_template('main.html', {}) class GTestQueryAction(MyRequestHandler): def get(self): gtest_results = [] cursor = None if self.request.get('gtest_query'): query = app.GTestResult.all() query.filter('fullname =', self.request.get('gtest_query')) query.order('-time_finished') if self.request.get('cursor'): query.with_cursor(start_cursor=self.request.get('cursor')) gtest_results = query.fetch(PAGE_SIZE) cursor = query.cursor() self._render_template('query.html', { 'gtest_query': self.request.get('gtest_query'), 'cursor': cursor, 'gtest_results': gtest_results, }) class SuppressionQueryAction(MyRequestHandler): def get(self): query = app.MemorySuppressionResult.all() query.filter('name =', self.request.get('suppression_query')) query.order('-time_finished') if self.request.get('cursor'): query.with_cursor(start_cursor=self.request.get('cursor')) suppression_results = query.fetch(PAGE_SIZE) self._render_template('suppression_query.html', { 'suppression_query': self.request.get('suppression_query'), 'cursor': query.cursor(), 'suppression_results': suppression_results, }) class UnusedSuppressionsAction(MyRequestHandler): def post(self): now_timestamp = datetime.datetime.now() queries = [] for line in self.request.body.splitlines(): query = app.MemorySuppressionResult.all() query.filter('name =', line) query.order('-time_finished') queries.append(query.run(limit=1)) for q in queries: for sr in q: if now_timestamp - sr.time_finished > datetime.timedelta(days=30): self.response.out.write(sr.name + '\n') class ListAction(MyRequestHandler): """Lists stored build results.""" def get(self): all_steps = app.BuildStep.all().order('-time_finished') if self.request.get('buildbot_root'): all_steps.filter('buildbot_root =', urllib.unquote(self.request.get('buildbot_root'))) if self.request.get('builder'): all_steps.filter('builder =', urllib.unquote(self.request.get('builder'))) if self.request.get('step_name'): all_steps.filter('step_name =', urllib.unquote(self.request.get('step_name'))) if self.request.get('status'): all_steps.filter('status =', _clean_int(urllib.unquote( self.request.get('status')), None)) if self.request.get('cursor'): all_steps.with_cursor(start_cursor=self.request.get('cursor')) steps = all_steps.fetch(limit=PAGE_SIZE) step_names = app.iterate_large_result(app.StepName.all().order('name')) self._render_template('list.html', { 'buildbot_roots': app.BUILDBOT_ROOTS, 'step_names': step_names, 'steps': steps, 'cursor': all_steps.cursor(), 'filter_buildbot_root': self.request.get('buildbot_root', ''), 'filter_builder': self.request.get('builder', ''), 'filter_step_name': self.request.get('step_name', ''), 'filter_status': self.request.get('status', ''), }) class BuildStepJSONAction(MyRequestHandler): def get(self): all_steps = app.BuildStep.all().order('-time_finished') if self.request.get('cursor'): all_steps.with_cursor(start_cursor=self.request.get('cursor')) build_steps = all_steps.fetch(limit=1000) json_data = { 'build_steps': [ { 'build_number': bs.build_number, 'buildbot_root': bs.buildbot_root, 'builder': bs.builder, 'status': bs.status, 'step_number': bs.step_number, 'step_name': bs.step_name, # BigQuery doesn't recognize the T separator, but space works. 'time_started': bs.time_started.isoformat(sep=' '), 'time_finished': bs.time_finished.isoformat(sep=' '), } for bs in build_steps ], 'cursor': all_steps.cursor(), } self.response.out.write(json.dumps(json_data)) class SuppressionSummaryAction(MyRequestHandler): """Displays summary information about memory suppressions.""" def get(self): sort = 'count' if self.request.get('sort') in ('count',): sort = self.request.get('sort') query = app.MemorySuppressionSummary.all() monthly_timestamp = datetime.date.today().replace(day=1) query.filter('monthly_timestamp =', monthly_timestamp) query.order('monthly_timestamp') query.order('-%s' % sort) if self.request.get('cursor'): query.with_cursor(start_cursor=self.request.get('cursor')) suppression_summaries = query.fetch(PAGE_SIZE) self._render_template('suppression_summary.html', { 'suppression_summary_query': self.request.get('suppression_summary_query'), 'suppression_summaries': suppression_summaries, 'cursor': query.cursor(), 'sort': sort, }) class ViewRawLogAction(blobstore_handlers.BlobstoreDownloadHandler): """Sends selected log file to the user.""" def get(self, blobkey): # pylint: disable=arguments-differ blob_info = blobstore.BlobInfo.get(urllib.unquote(blobkey)) if not blob_info: self.error(404) return self.send_blob(blob_info) application = webapp.WSGIApplication( [('/', MainAction), ('/gtest_query', GTestQueryAction), ('/suppression_query', SuppressionQueryAction), ('/suppression_summary', SuppressionSummaryAction), ('/unused_suppressions', UnusedSuppressionsAction), ('/list', ListAction), ('/build_step_json', BuildStepJSONAction), ('/status_receiver', StatusReceiverAction), ('/tasks/fetch_builders', FetchBuildersAction), ('/tasks/fetch_steps', FetchStepsAction), ('/tasks/update_parsed_data', UpdateParsedDataAction), ('/viewlog/raw/(.*)', ViewRawLogAction)]) def main(): my_default_retry_params = cloudstorage.RetryParams( initial_delay=0.5, max_delay=30.0, backoff_factor=2, urlfetch_timeout=60) cloudstorage.set_default_retry_params(my_default_retry_params) run_wsgi_app(application) if __name__ == '__main__': main()
[]
tedye/leetcode
tools/leetcode.127.Word Ladder/leetcode.127.Word Ladder.submission1.py
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
class Solution: # @param {string} beginWord # @param {string} endWord # @param {set<string>} wordDict # @return {integer} def ladderLength(self, beginWord, endWord, wordDict): # BFS scanLayer = [beginWord] distRecord = [1] while scanLayer: curWord = scanLayer.pop(0) dist = distRecord.pop(0) if curWord == endWord: return dist for i in range(len(beginWord)): for j in 'abcdefghijklmnopqrstuvwxyz': newWord = curWord[:i] + j + curWord[i+1:] if newWord in wordDict: scanLayer.append(newWord) distRecord.append(dist+1) wordDict.remove(newWord) return 0
[]
YevhenVieskov/ML-DL-in-production
aws_lambda/pytorch/source/caffe2/python/operator_test/elementwise_op_broadcast_test.py
03839abcb93a49d4f05c43aa4e446a040027cdb0
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest from hypothesis import given import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util as hu import caffe2.python.serialized_test.serialized_test_util as serial # TODO(jiayq): make them hypothesis tests for better coverage. class TestElementwiseBroadcast(serial.SerializedTestCase): @given(**hu.gcs) def test_broadcast_Add(self, gc, dc): # Set broadcast and no axis, i.e. broadcasting last dimensions. X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(4, 5).astype(np.float32) op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X + Y) self.assertDeviceChecks(dc, op, [X, Y], [0]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) # broadcasting intermediate dimensions X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(3, 4).astype(np.float32) op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1, axis=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X + Y[:, :, np.newaxis]) self.assertDeviceChecks(dc, op, [X, Y], [0]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) # broadcasting the first dimension X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(2).astype(np.float32) op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1, axis=0) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal( out, X + Y[:, np.newaxis, np.newaxis, np.newaxis]) self.assertDeviceChecks(dc, op, [X, Y], [0]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) # broadcasting with single elem dimensions at both ends X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(1, 4, 1).astype(np.float32) op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1, axis=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X + Y) self.assertDeviceChecks(dc, op, [X, Y], [0]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) @given(**hu.gcs) def test_broadcast_Mul(self, gc, dc): # Set broadcast and no axis, i.e. broadcasting last dimensions. X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(4, 5).astype(np.float32) op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X * Y) self.assertDeviceChecks(dc, op, [X, Y], [0]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) # broadcasting intermediate dimensions X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(3, 4).astype(np.float32) op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1, axis=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X * Y[:, :, np.newaxis]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) self.assertDeviceChecks(dc, op, [X, Y], [0]) # broadcasting the first dimension X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(2).astype(np.float32) op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1, axis=0) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal( out, X * Y[:, np.newaxis, np.newaxis, np.newaxis]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) self.assertDeviceChecks(dc, op, [X, Y], [0]) # broadcasting with single elem dimensions at both ends X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(1, 4, 1).astype(np.float32) op = core.CreateOperator("Mul", ["X", "Y"], "out", broadcast=1, axis=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X * Y) self.assertDeviceChecks(dc, op, [X, Y], [0]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) @given(**hu.gcs) def test_broadcast_Sub(self, gc, dc): # Set broadcast and no axis, i.e. broadcasting last dimensions. X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(4, 5).astype(np.float32) op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X - Y) self.assertDeviceChecks(dc, op, [X, Y], [0]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) # broadcasting intermediate dimensions X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(3, 4).astype(np.float32) op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1, axis=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X - Y[:, :, np.newaxis]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) self.assertDeviceChecks(dc, op, [X, Y], [0]) # broadcasting the first dimension X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(2).astype(np.float32) op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1, axis=0) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal( out, X - Y[:, np.newaxis, np.newaxis, np.newaxis]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) self.assertDeviceChecks(dc, op, [X, Y], [0]) # broadcasting with single elem dimensions at both ends X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(1, 4, 1).astype(np.float32) op = core.CreateOperator("Sub", ["X", "Y"], "out", broadcast=1, axis=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X - Y) self.assertDeviceChecks(dc, op, [X, Y], [0]) self.assertGradientChecks(gc, op, [X, Y], 1, [0]) @serial.given(**hu.gcs) def test_broadcast_powt(self, gc, dc): np.random.seed(101) #operator def powt_op(X, Y): return [np.power(X, Y)] #two gradients Y*X^(Y-1) and X^Y * ln(X) def powt_grad(g_out, outputs, fwd_inputs): [X, Y] = fwd_inputs Z = outputs[0] return ([Y * np.power(X, Y - 1), Z * np.log(X)] * g_out) #1. Set broadcast and no axis, i.e. broadcasting last dimensions. X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0 Y = np.random.rand(4, 5).astype(np.float32) + 2.0 #two gradients Y*X^(Y-1) and X^Y * ln(X) #latter gradient is sumed over 1 and 0 dims to account for broadcast def powt_grad_broadcast(g_out, outputs, fwd_inputs): [GX, GY] = powt_grad(g_out, outputs, fwd_inputs) return ([GX, np.sum(np.sum(GY, 1), 0)]) op = core.CreateOperator("Pow", ["X", "Y"], "Z", broadcast=1) self.assertReferenceChecks(device_option=gc, op=op, inputs=[X, Y], reference=powt_op, output_to_grad="Z", grad_reference=powt_grad_broadcast) #2. broadcasting intermediate dimensions X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0 Y = np.random.rand(3, 4).astype(np.float32) + 2.0 #pow op with the latter array increased by one dim def powt_op_axis1(X, Y): return powt_op(X, Y[:, :, np.newaxis]) #two gradients Y*X^(Y-1) and X^Y * ln(X) #latter gradient is sumed over 3 and 0 dims to account for broadcast def powt_grad_axis1(g_out, outputs, fwd_inputs): [X, Y] = fwd_inputs [GX, GY] = powt_grad(g_out, outputs, [X, Y[:, :, np.newaxis]]) return ([GX, np.sum(np.sum(GY, 3), 0)]) op = core.CreateOperator("Pow", ["X", "Y"], "Z", broadcast=1, axis=1) self.assertReferenceChecks(device_option=gc, op=op, inputs=[X, Y], reference=powt_op_axis1, output_to_grad="Z", grad_reference=powt_grad_axis1) #3. broadcasting the first dimension X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0 Y = np.random.rand(2).astype(np.float32) + 2.0 #pow op with the latter array increased by one dim def powt_op_axis0(X, Y): return powt_op(X, Y[:, np.newaxis, np.newaxis, np.newaxis]) #two gradients Y*X^(Y-1) and X^Y * ln(X) #latter gradient is sumed over 3, 2 and 1 dims to account for broadcast def powt_grad_axis0(g_out, outputs, fwd_inputs): [X, Y] = fwd_inputs [GX, GY] = powt_grad(g_out, outputs, [X, Y[:, np.newaxis, np.newaxis, np.newaxis]]) return ([GX, np.sum(np.sum(np.sum(GY, 3), 2), 1)]) op = core.CreateOperator("Pow", ["X", "Y"], "Z", broadcast=1, axis=0) self.assertReferenceChecks(device_option=gc, op=op, inputs=[X, Y], reference=powt_op_axis0, output_to_grad="Z", grad_reference=powt_grad_axis0) #4. broadcasting with single elem dimensions at both ends X = np.random.rand(2, 3, 4, 5).astype(np.float32) + 1.0 Y = np.random.rand(1, 4, 1).astype(np.float32) + 2.0 #pow op with the latter array increased by one dim def powt_op_mixed(X, Y): return powt_op(X, Y[np.newaxis, :, :, :]) #two gradients Y*X^(Y-1) and X^Y * ln(X) #latter gradient is sumed over 0 and 1 dims to account for broadcast def powt_grad_mixed(g_out, outputs, fwd_inputs): [X, Y] = fwd_inputs [GX, GY] = powt_grad(g_out, outputs, [X, Y[np.newaxis, :, :, :]]) return ([GX, np.reshape(np.sum(np.sum(np.sum(GY, 3), 1), 0), (1, 4, 1))]) op = core.CreateOperator("Pow", ["X", "Y"], "Z", broadcast=1, axis=1) self.assertReferenceChecks(device_option=gc, op=op, inputs=[X, Y], reference=powt_op_mixed, output_to_grad="Z", grad_reference=powt_grad_mixed) @given(**hu.gcs) def test_broadcast_scalar(self, gc, dc): # broadcasting constant X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(1).astype(np.float32) op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal( out, X + Y) self.assertDeviceChecks(dc, op, [X, Y], [0]) # broadcasting scalar X = np.random.rand(1).astype(np.float32) Y = np.random.rand(1).astype(np.float32).reshape([]) op = core.CreateOperator("Add", ["X", "Y"], "out", broadcast=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal( out, X + Y) self.assertDeviceChecks(dc, op, [X, Y], [0]) @given(**hu.gcs) def test_semantic_broadcast(self, gc, dc): # NCHW as default X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(3).astype(np.float32) op = core.CreateOperator( "Add", ["X", "Y"], "out", broadcast=1, axis_str="C") workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal( out, X + Y[:, np.newaxis, np.newaxis]) self.assertDeviceChecks(dc, op, [X, Y], [0]) # NHWC X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(5).astype(np.float32) op = core.CreateOperator( "Add", ["X", "Y"], "out", broadcast=1, axis_str="C", order="NHWC") workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") np.testing.assert_array_almost_equal(out, X + Y) self.assertDeviceChecks(dc, op, [X, Y], [0]) @given(**hu.gcs) def test_sum_reduce_empty_blob(self, gc, dc): net = core.Net('test') with core.DeviceScope(gc): net.GivenTensorFill([], ["X"], values=[], shape=[2, 0, 5]) net.GivenTensorFill([], ["Y"], values=[], shape=[2, 0]) net.SumReduceLike(["X", "Y"], "out", axis=0) workspace.RunNetOnce(net) @given(**hu.gcs) def test_sum_reduce(self, gc, dc): # Set broadcast and no axis, i.e. broadcasting last dimensions. X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(4, 5).astype(np.float32) op = core.CreateOperator( "SumReduceLike", ["X", "Y"], "out", broadcast=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") res = np.sum(X, axis=0) res = np.sum(res, axis=0) np.testing.assert_array_almost_equal(out, res) self.assertDeviceChecks(dc, op, [X, Y], [0]) # Set broadcast and no axis, i.e. broadcasting last dimensions. X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(2, 3).astype(np.float32) op = core.CreateOperator( "SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=0) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") res = np.sum(X, axis=3) res = np.sum(res, axis=2) np.testing.assert_array_almost_equal(out, res, decimal=3) self.assertDeviceChecks(dc, op, [X, Y], [0]) # broadcasting intermediate dimensions X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(3, 4).astype(np.float32) op = core.CreateOperator( "SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") res = np.sum(X, axis=0) res = np.sum(res, axis=2) np.testing.assert_array_almost_equal(out, res) self.assertDeviceChecks(dc, op, [X, Y], [0]) # broadcasting intermediate dimensions X = np.random.rand(2, 3, 4, 500).astype(np.float64) Y = np.random.rand(1).astype(np.float64) op = core.CreateOperator( "SumReduceLike", ["X", "Y"], "out", broadcast=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") res = np.array(np.sum(X)) np.testing.assert_array_almost_equal(out, res, decimal=0) # broadcasting with single elem dimensions at both ends X = np.random.rand(2, 3, 4, 5).astype(np.float32) Y = np.random.rand(1, 3, 4, 1).astype(np.float32) op = core.CreateOperator( "SumReduceLike", ["X", "Y"], "out", broadcast=1) workspace.FeedBlob("X", X) workspace.FeedBlob("Y", Y) workspace.RunOperatorOnce(op) out = workspace.FetchBlob("out") res = np.sum(X, axis=0) res = np.sum(res, axis=2).reshape(Y.shape) np.testing.assert_array_almost_equal(out, res) self.assertDeviceChecks(dc, op, [X, Y], [0]) # fp64 is not supported with the CUDA op dc_cpu_only = [d for d in dc if d.device_type != caffe2_pb2.CUDA] self.assertDeviceChecks(dc_cpu_only, op, [X, Y], [0]) @unittest.skipIf(not workspace.has_gpu_support, "No gpu support") @given(**hu.gcs_gpu_only) def test_sum_reduce_fp16(self, gc, dc): # Set broadcast and no axis, i.e. broadcasting last dimensions. X = np.random.rand(2, 3, 4, 5).astype(np.float16) Y = np.random.rand(4, 5).astype(np.float16) op = core.CreateOperator( "SumReduceLike", ["X", "Y"], "out", broadcast=1, device_option=gc) def ref_op(X, Y): res = np.sum(X, axis=0) res = np.sum(res, axis=0) return [res] self.assertReferenceChecks( device_option=gc, op=op, inputs=[X, Y], reference=ref_op, threshold=1e-3) # Set broadcast and no axis, i.e. broadcasting last dimensions. X = np.random.rand(2, 3, 4, 5).astype(np.float16) Y = np.random.rand(2, 3).astype(np.float16) op = core.CreateOperator( "SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=0) def ref_op(X, Y): res = np.sum(X, axis=3) res = np.sum(res, axis=2) return [res] self.assertReferenceChecks( device_option=gc, op=op, inputs=[X, Y], reference=ref_op, threshold=1e-3) # broadcasting intermediate dimensions X = np.random.rand(2, 3, 4, 5).astype(np.float16) Y = np.random.rand(3, 4).astype(np.float16) op = core.CreateOperator( "SumReduceLike", ["X", "Y"], "out", broadcast=1, axis=1) def ref_op(X, Y): res = np.sum(X, axis=0) res = np.sum(res, axis=2) return [res] self.assertReferenceChecks( device_option=gc, op=op, inputs=[X, Y], reference=ref_op, threshold=1e-3) # broadcasting with single elem dimensions at both ends X = np.random.rand(2, 3, 4, 5).astype(np.float16) Y = np.random.rand(1, 3, 4, 1).astype(np.float16) op = core.CreateOperator( "SumReduceLike", ["X", "Y"], "out", broadcast=1) def ref_op(X, Y): res = np.sum(X, axis=0) res = np.sum(res, axis=2) return [res.reshape(Y.shape)] self.assertReferenceChecks( device_option=gc, op=op, inputs=[X, Y], reference=ref_op, threshold=1e-3) if __name__ == "__main__": unittest.main()
[((19, 5, 19, 20), 'hypothesis.given', 'given', ({}, {}), '(**hu.gcs)', False, 'from hypothesis import given\n'), ((70, 5, 70, 20), 'hypothesis.given', 'given', ({}, {}), '(**hu.gcs)', False, 'from hypothesis import given\n'), ((121, 5, 121, 20), 'hypothesis.given', 'given', ({}, {}), '(**hu.gcs)', False, 'from hypothesis import given\n'), ((172, 5, 172, 27), 'caffe2.python.serialized_test.serialized_test_util.given', 'serial.given', ({}, {}), '(**hu.gcs)', True, 'import caffe2.python.serialized_test.serialized_test_util as serial\n'), ((276, 5, 276, 20), 'hypothesis.given', 'given', ({}, {}), '(**hu.gcs)', False, 'from hypothesis import given\n'), ((302, 5, 302, 20), 'hypothesis.given', 'given', ({}, {}), '(**hu.gcs)', False, 'from hypothesis import given\n'), ((329, 5, 329, 20), 'hypothesis.given', 'given', ({}, {}), '(**hu.gcs)', False, 'from hypothesis import given\n'), ((339, 5, 339, 20), 'hypothesis.given', 'given', ({}, {}), '(**hu.gcs)', False, 'from hypothesis import given\n'), ((413, 5, 413, 69), 'unittest.skipIf', 'unittest.skipIf', ({(413, 21, 413, 50): '(not workspace.has_gpu_support)', (413, 52, 413, 68): '"""No gpu support"""'}, {}), "(not workspace.has_gpu_support, 'No gpu support')", False, 'import unittest\n'), ((414, 5, 414, 29), 'hypothesis.given', 'given', ({}, {}), '(**hu.gcs_gpu_only)', False, 'from hypothesis import given\n'), ((489, 4, 489, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((24, 13, 24, 71), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((25, 8, 25, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(25, 27, 25, 30): '"""X"""', (25, 32, 25, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((26, 8, 26, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(26, 27, 26, 30): '"""Y"""', (26, 32, 26, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((27, 8, 27, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(27, 34, 27, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((28, 14, 28, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(28, 34, 28, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((29, 8, 29, 56), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(29, 45, 29, 48): 'out', (29, 50, 29, 55): '(X + Y)'}, {}), '(out, X + Y)', True, 'import numpy as np\n'), ((36, 13, 36, 79), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((37, 8, 37, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(37, 27, 37, 30): '"""X"""', (37, 32, 37, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((38, 8, 38, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(38, 27, 38, 30): '"""Y"""', (38, 32, 38, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((39, 8, 39, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(39, 34, 39, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((40, 14, 40, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(40, 34, 40, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((41, 8, 41, 74), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(41, 45, 41, 48): 'out', (41, 50, 41, 73): '(X + Y[:, :, (np.newaxis)])'}, {}), '(out, X + Y[:, :, (np.newaxis)])', True, 'import numpy as np\n'), ((48, 13, 48, 79), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((49, 8, 49, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(49, 27, 49, 30): '"""X"""', (49, 32, 49, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((50, 8, 50, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(50, 27, 50, 30): '"""Y"""', (50, 32, 50, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((51, 8, 51, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(51, 34, 51, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((52, 14, 52, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(52, 34, 52, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((53, 8, 54, 62), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(54, 12, 54, 15): 'out', (54, 17, 54, 61): '(X + Y[:, (np.newaxis), (np.newaxis), (np.newaxis)])'}, {}), '(out, X + Y[:, (np.newaxis), (np.\n newaxis), (np.newaxis)])', True, 'import numpy as np\n'), ((61, 13, 61, 79), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((62, 8, 62, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(62, 27, 62, 30): '"""X"""', (62, 32, 62, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((63, 8, 63, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(63, 27, 63, 30): '"""Y"""', (63, 32, 63, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((64, 8, 64, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(64, 34, 64, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((65, 14, 65, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(65, 34, 65, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((66, 8, 66, 56), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(66, 45, 66, 48): 'out', (66, 50, 66, 55): '(X + Y)'}, {}), '(out, X + Y)', True, 'import numpy as np\n'), ((75, 13, 75, 71), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((76, 8, 76, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(76, 27, 76, 30): '"""X"""', (76, 32, 76, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((77, 8, 77, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(77, 27, 77, 30): '"""Y"""', (77, 32, 77, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((78, 8, 78, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(78, 34, 78, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((79, 14, 79, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(79, 34, 79, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((80, 8, 80, 56), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(80, 45, 80, 48): 'out', (80, 50, 80, 55): '(X * Y)'}, {}), '(out, X * Y)', True, 'import numpy as np\n'), ((87, 13, 87, 79), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((88, 8, 88, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(88, 27, 88, 30): '"""X"""', (88, 32, 88, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((89, 8, 89, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(89, 27, 89, 30): '"""Y"""', (89, 32, 89, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((90, 8, 90, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(90, 34, 90, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((91, 14, 91, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(91, 34, 91, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((92, 8, 92, 74), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(92, 45, 92, 48): 'out', (92, 50, 92, 73): '(X * Y[:, :, (np.newaxis)])'}, {}), '(out, X * Y[:, :, (np.newaxis)])', True, 'import numpy as np\n'), ((99, 13, 99, 79), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((100, 8, 100, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(100, 27, 100, 30): '"""X"""', (100, 32, 100, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((101, 8, 101, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(101, 27, 101, 30): '"""Y"""', (101, 32, 101, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((102, 8, 102, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(102, 34, 102, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((103, 14, 103, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(103, 34, 103, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((104, 8, 105, 62), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(105, 12, 105, 15): 'out', (105, 17, 105, 61): '(X * Y[:, (np.newaxis), (np.newaxis), (np.newaxis)])'}, {}), '(out, X * Y[:, (np.newaxis), (np.\n newaxis), (np.newaxis)])', True, 'import numpy as np\n'), ((112, 13, 112, 79), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((113, 8, 113, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(113, 27, 113, 30): '"""X"""', (113, 32, 113, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((114, 8, 114, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(114, 27, 114, 30): '"""Y"""', (114, 32, 114, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((115, 8, 115, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(115, 34, 115, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((116, 14, 116, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(116, 34, 116, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((117, 8, 117, 56), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(117, 45, 117, 48): 'out', (117, 50, 117, 55): '(X * Y)'}, {}), '(out, X * Y)', True, 'import numpy as np\n'), ((126, 13, 126, 71), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((127, 8, 127, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(127, 27, 127, 30): '"""X"""', (127, 32, 127, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((128, 8, 128, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(128, 27, 128, 30): '"""Y"""', (128, 32, 128, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((129, 8, 129, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(129, 34, 129, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((130, 14, 130, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(130, 34, 130, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((131, 8, 131, 56), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(131, 45, 131, 48): 'out', (131, 50, 131, 55): '(X - Y)'}, {}), '(out, X - Y)', True, 'import numpy as np\n'), ((138, 13, 138, 79), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((139, 8, 139, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(139, 27, 139, 30): '"""X"""', (139, 32, 139, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((140, 8, 140, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(140, 27, 140, 30): '"""Y"""', (140, 32, 140, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((141, 8, 141, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(141, 34, 141, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((142, 14, 142, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(142, 34, 142, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((143, 8, 143, 74), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(143, 45, 143, 48): 'out', (143, 50, 143, 73): '(X - Y[:, :, (np.newaxis)])'}, {}), '(out, X - Y[:, :, (np.newaxis)])', True, 'import numpy as np\n'), ((150, 13, 150, 79), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((151, 8, 151, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(151, 27, 151, 30): '"""X"""', (151, 32, 151, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((152, 8, 152, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(152, 27, 152, 30): '"""Y"""', (152, 32, 152, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((153, 8, 153, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(153, 34, 153, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((154, 14, 154, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(154, 34, 154, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((155, 8, 156, 62), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(156, 12, 156, 15): 'out', (156, 17, 156, 61): '(X - Y[:, (np.newaxis), (np.newaxis), (np.newaxis)])'}, {}), '(out, X - Y[:, (np.newaxis), (np.\n newaxis), (np.newaxis)])', True, 'import numpy as np\n'), ((163, 13, 163, 79), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((164, 8, 164, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(164, 27, 164, 30): '"""X"""', (164, 32, 164, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((165, 8, 165, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(165, 27, 165, 30): '"""Y"""', (165, 32, 165, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((166, 8, 166, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(166, 34, 166, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((167, 14, 167, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(167, 34, 167, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((168, 8, 168, 56), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(168, 45, 168, 48): 'out', (168, 50, 168, 55): '(X - Y)'}, {}), '(out, X - Y)', True, 'import numpy as np\n'), ((174, 8, 174, 27), 'numpy.random.seed', 'np.random.seed', ({(174, 23, 174, 26): '(101)'}, {}), '(101)', True, 'import numpy as np\n'), ((196, 13, 196, 69), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((219, 13, 219, 77), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((244, 13, 244, 77), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((268, 13, 268, 77), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((281, 13, 281, 71), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((282, 8, 282, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(282, 27, 282, 30): '"""X"""', (282, 32, 282, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((283, 8, 283, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(283, 27, 283, 30): '"""Y"""', (283, 32, 283, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((284, 8, 284, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(284, 34, 284, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((285, 14, 285, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(285, 34, 285, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((286, 8, 287, 23), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(287, 12, 287, 15): 'out', (287, 17, 287, 22): '(X + Y)'}, {}), '(out, X + Y)', True, 'import numpy as np\n'), ((293, 13, 293, 71), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((294, 8, 294, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(294, 27, 294, 30): '"""X"""', (294, 32, 294, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((295, 8, 295, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(295, 27, 295, 30): '"""Y"""', (295, 32, 295, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((296, 8, 296, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(296, 34, 296, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((297, 14, 297, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(297, 34, 297, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((298, 8, 299, 23), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(299, 12, 299, 15): 'out', (299, 17, 299, 22): '(X + Y)'}, {}), '(out, X + Y)', True, 'import numpy as np\n'), ((307, 13, 308, 64), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((309, 8, 309, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(309, 27, 309, 30): '"""X"""', (309, 32, 309, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((310, 8, 310, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(310, 27, 310, 30): '"""Y"""', (310, 32, 310, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((311, 8, 311, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(311, 34, 311, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((312, 14, 312, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(312, 34, 312, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((313, 8, 314, 50), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(314, 12, 314, 15): 'out', (314, 17, 314, 49): '(X + Y[:, (np.newaxis), (np.newaxis)])'}, {}), '(out, X + Y[:, (np.newaxis), (np.newaxis)])', True, 'import numpy as np\n'), ((320, 13, 321, 78), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((322, 8, 322, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(322, 27, 322, 30): '"""X"""', (322, 32, 322, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((323, 8, 323, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(323, 27, 323, 30): '"""Y"""', (323, 32, 323, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((324, 8, 324, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(324, 34, 324, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((325, 14, 325, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(325, 34, 325, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((326, 8, 326, 56), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(326, 45, 326, 48): 'out', (326, 50, 326, 55): '(X + Y)'}, {}), '(out, X + Y)', True, 'import numpy as np\n'), ((331, 14, 331, 30), 'caffe2.python.core.Net', 'core.Net', ({(331, 23, 331, 29): '"""test"""'}, {}), "('test')", False, 'from caffe2.python import core, workspace\n'), ((344, 13, 345, 60), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((346, 8, 346, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(346, 27, 346, 30): '"""X"""', (346, 32, 346, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((347, 8, 347, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(347, 27, 347, 30): '"""Y"""', (347, 32, 347, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((348, 8, 348, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(348, 34, 348, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((349, 14, 349, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(349, 34, 349, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((350, 14, 350, 31), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((351, 14, 351, 33), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((352, 8, 352, 54), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(352, 45, 352, 48): 'out', (352, 50, 352, 53): 'res'}, {}), '(out, res)', True, 'import numpy as np\n'), ((358, 13, 359, 68), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((360, 8, 360, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(360, 27, 360, 30): '"""X"""', (360, 32, 360, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((361, 8, 361, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(361, 27, 361, 30): '"""Y"""', (361, 32, 361, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((362, 8, 362, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(362, 34, 362, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((363, 14, 363, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(363, 34, 363, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((364, 14, 364, 31), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((365, 14, 365, 33), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((366, 8, 366, 65), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (), '', True, 'import numpy as np\n'), ((372, 13, 373, 68), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((374, 8, 374, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(374, 27, 374, 30): '"""X"""', (374, 32, 374, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((375, 8, 375, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(375, 27, 375, 30): '"""Y"""', (375, 32, 375, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((376, 8, 376, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(376, 34, 376, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((377, 14, 377, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(377, 34, 377, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((378, 14, 378, 31), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((379, 14, 379, 33), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((380, 8, 380, 54), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(380, 45, 380, 48): 'out', (380, 50, 380, 53): 'res'}, {}), '(out, res)', True, 'import numpy as np\n'), ((386, 13, 387, 60), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((388, 8, 388, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(388, 27, 388, 30): '"""X"""', (388, 32, 388, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((389, 8, 389, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(389, 27, 389, 30): '"""Y"""', (389, 32, 389, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((390, 8, 390, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(390, 34, 390, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((391, 14, 391, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(391, 34, 391, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((393, 8, 393, 65), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (), '', True, 'import numpy as np\n'), ((398, 13, 399, 60), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((400, 8, 400, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(400, 27, 400, 30): '"""X"""', (400, 32, 400, 33): 'X'}, {}), "('X', X)", False, 'from caffe2.python import core, workspace\n'), ((401, 8, 401, 34), 'caffe2.python.workspace.FeedBlob', 'workspace.FeedBlob', ({(401, 27, 401, 30): '"""Y"""', (401, 32, 401, 33): 'Y'}, {}), "('Y', Y)", False, 'from caffe2.python import core, workspace\n'), ((402, 8, 402, 37), 'caffe2.python.workspace.RunOperatorOnce', 'workspace.RunOperatorOnce', ({(402, 34, 402, 36): 'op'}, {}), '(op)', False, 'from caffe2.python import core, workspace\n'), ((403, 14, 403, 40), 'caffe2.python.workspace.FetchBlob', 'workspace.FetchBlob', ({(403, 34, 403, 39): '"""out"""'}, {}), "('out')", False, 'from caffe2.python import core, workspace\n'), ((404, 14, 404, 31), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((406, 8, 406, 54), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', ({(406, 45, 406, 48): 'out', (406, 50, 406, 53): 'res'}, {}), '(out, res)', True, 'import numpy as np\n'), ((419, 13, 420, 78), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((437, 13, 438, 68), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((455, 13, 456, 68), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((473, 13, 474, 60), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (), '', False, 'from caffe2.python import core, workspace\n'), ((333, 13, 333, 33), 'caffe2.python.core.DeviceScope', 'core.DeviceScope', ({(333, 30, 333, 32): 'gc'}, {}), '(gc)', False, 'from caffe2.python import core, workspace\n'), ((337, 12, 337, 37), 'caffe2.python.workspace.RunNetOnce', 'workspace.RunNetOnce', ({(337, 33, 337, 36): 'net'}, {}), '(net)', False, 'from caffe2.python import core, workspace\n'), ((392, 23, 392, 32), 'numpy.sum', 'np.sum', ({(392, 30, 392, 31): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((423, 18, 423, 35), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((424, 18, 424, 37), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((441, 18, 441, 35), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((442, 18, 442, 37), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((459, 18, 459, 35), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((460, 18, 460, 37), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((477, 18, 477, 35), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((478, 18, 478, 37), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((22, 12, 22, 38), 'numpy.random.rand', 'np.random.rand', ({(22, 27, 22, 28): '2', (22, 30, 22, 31): '3', (22, 33, 22, 34): '4', (22, 36, 22, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((23, 12, 23, 32), 'numpy.random.rand', 'np.random.rand', ({(23, 27, 23, 28): '4', (23, 30, 23, 31): '5'}, {}), '(4, 5)', True, 'import numpy as np\n'), ((34, 12, 34, 38), 'numpy.random.rand', 'np.random.rand', ({(34, 27, 34, 28): '2', (34, 30, 34, 31): '3', (34, 33, 34, 34): '4', (34, 36, 34, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((35, 12, 35, 32), 'numpy.random.rand', 'np.random.rand', ({(35, 27, 35, 28): '3', (35, 30, 35, 31): '4'}, {}), '(3, 4)', True, 'import numpy as np\n'), ((46, 12, 46, 38), 'numpy.random.rand', 'np.random.rand', ({(46, 27, 46, 28): '2', (46, 30, 46, 31): '3', (46, 33, 46, 34): '4', (46, 36, 46, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((47, 12, 47, 29), 'numpy.random.rand', 'np.random.rand', ({(47, 27, 47, 28): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((59, 12, 59, 38), 'numpy.random.rand', 'np.random.rand', ({(59, 27, 59, 28): '2', (59, 30, 59, 31): '3', (59, 33, 59, 34): '4', (59, 36, 59, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((60, 12, 60, 35), 'numpy.random.rand', 'np.random.rand', ({(60, 27, 60, 28): '1', (60, 30, 60, 31): '4', (60, 33, 60, 34): '1'}, {}), '(1, 4, 1)', True, 'import numpy as np\n'), ((73, 12, 73, 38), 'numpy.random.rand', 'np.random.rand', ({(73, 27, 73, 28): '2', (73, 30, 73, 31): '3', (73, 33, 73, 34): '4', (73, 36, 73, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((74, 12, 74, 32), 'numpy.random.rand', 'np.random.rand', ({(74, 27, 74, 28): '4', (74, 30, 74, 31): '5'}, {}), '(4, 5)', True, 'import numpy as np\n'), ((85, 12, 85, 38), 'numpy.random.rand', 'np.random.rand', ({(85, 27, 85, 28): '2', (85, 30, 85, 31): '3', (85, 33, 85, 34): '4', (85, 36, 85, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((86, 12, 86, 32), 'numpy.random.rand', 'np.random.rand', ({(86, 27, 86, 28): '3', (86, 30, 86, 31): '4'}, {}), '(3, 4)', True, 'import numpy as np\n'), ((97, 12, 97, 38), 'numpy.random.rand', 'np.random.rand', ({(97, 27, 97, 28): '2', (97, 30, 97, 31): '3', (97, 33, 97, 34): '4', (97, 36, 97, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((98, 12, 98, 29), 'numpy.random.rand', 'np.random.rand', ({(98, 27, 98, 28): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((110, 12, 110, 38), 'numpy.random.rand', 'np.random.rand', ({(110, 27, 110, 28): '2', (110, 30, 110, 31): '3', (110, 33, 110, 34): '4', (110, 36, 110, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((111, 12, 111, 35), 'numpy.random.rand', 'np.random.rand', ({(111, 27, 111, 28): '1', (111, 30, 111, 31): '4', (111, 33, 111, 34): '1'}, {}), '(1, 4, 1)', True, 'import numpy as np\n'), ((124, 12, 124, 38), 'numpy.random.rand', 'np.random.rand', ({(124, 27, 124, 28): '2', (124, 30, 124, 31): '3', (124, 33, 124, 34): '4', (124, 36, 124, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((125, 12, 125, 32), 'numpy.random.rand', 'np.random.rand', ({(125, 27, 125, 28): '4', (125, 30, 125, 31): '5'}, {}), '(4, 5)', True, 'import numpy as np\n'), ((136, 12, 136, 38), 'numpy.random.rand', 'np.random.rand', ({(136, 27, 136, 28): '2', (136, 30, 136, 31): '3', (136, 33, 136, 34): '4', (136, 36, 136, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((137, 12, 137, 32), 'numpy.random.rand', 'np.random.rand', ({(137, 27, 137, 28): '3', (137, 30, 137, 31): '4'}, {}), '(3, 4)', True, 'import numpy as np\n'), ((148, 12, 148, 38), 'numpy.random.rand', 'np.random.rand', ({(148, 27, 148, 28): '2', (148, 30, 148, 31): '3', (148, 33, 148, 34): '4', (148, 36, 148, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((149, 12, 149, 29), 'numpy.random.rand', 'np.random.rand', ({(149, 27, 149, 28): '2'}, {}), '(2)', True, 'import numpy as np\n'), ((161, 12, 161, 38), 'numpy.random.rand', 'np.random.rand', ({(161, 27, 161, 28): '2', (161, 30, 161, 31): '3', (161, 33, 161, 34): '4', (161, 36, 161, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((162, 12, 162, 35), 'numpy.random.rand', 'np.random.rand', ({(162, 27, 162, 28): '1', (162, 30, 162, 31): '4', (162, 33, 162, 34): '1'}, {}), '(1, 4, 1)', True, 'import numpy as np\n'), ((178, 20, 178, 34), 'numpy.power', 'np.power', ({(178, 29, 178, 30): 'X', (178, 32, 178, 33): 'Y'}, {}), '(X, Y)', True, 'import numpy as np\n'), ((279, 12, 279, 38), 'numpy.random.rand', 'np.random.rand', ({(279, 27, 279, 28): '2', (279, 30, 279, 31): '3', (279, 33, 279, 34): '4', (279, 36, 279, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((280, 12, 280, 29), 'numpy.random.rand', 'np.random.rand', ({(280, 27, 280, 28): '1'}, {}), '(1)', True, 'import numpy as np\n'), ((291, 12, 291, 29), 'numpy.random.rand', 'np.random.rand', ({(291, 27, 291, 28): '1'}, {}), '(1)', True, 'import numpy as np\n'), ((305, 12, 305, 38), 'numpy.random.rand', 'np.random.rand', ({(305, 27, 305, 28): '2', (305, 30, 305, 31): '3', (305, 33, 305, 34): '4', (305, 36, 305, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((306, 12, 306, 29), 'numpy.random.rand', 'np.random.rand', ({(306, 27, 306, 28): '3'}, {}), '(3)', True, 'import numpy as np\n'), ((318, 12, 318, 38), 'numpy.random.rand', 'np.random.rand', ({(318, 27, 318, 28): '2', (318, 30, 318, 31): '3', (318, 33, 318, 34): '4', (318, 36, 318, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((319, 12, 319, 29), 'numpy.random.rand', 'np.random.rand', ({(319, 27, 319, 28): '5'}, {}), '(5)', True, 'import numpy as np\n'), ((342, 12, 342, 38), 'numpy.random.rand', 'np.random.rand', ({(342, 27, 342, 28): '2', (342, 30, 342, 31): '3', (342, 33, 342, 34): '4', (342, 36, 342, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((343, 12, 343, 32), 'numpy.random.rand', 'np.random.rand', ({(343, 27, 343, 28): '4', (343, 30, 343, 31): '5'}, {}), '(4, 5)', True, 'import numpy as np\n'), ((356, 12, 356, 38), 'numpy.random.rand', 'np.random.rand', ({(356, 27, 356, 28): '2', (356, 30, 356, 31): '3', (356, 33, 356, 34): '4', (356, 36, 356, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((357, 12, 357, 32), 'numpy.random.rand', 'np.random.rand', ({(357, 27, 357, 28): '2', (357, 30, 357, 31): '3'}, {}), '(2, 3)', True, 'import numpy as np\n'), ((370, 12, 370, 38), 'numpy.random.rand', 'np.random.rand', ({(370, 27, 370, 28): '2', (370, 30, 370, 31): '3', (370, 33, 370, 34): '4', (370, 36, 370, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((371, 12, 371, 32), 'numpy.random.rand', 'np.random.rand', ({(371, 27, 371, 28): '3', (371, 30, 371, 31): '4'}, {}), '(3, 4)', True, 'import numpy as np\n'), ((384, 12, 384, 40), 'numpy.random.rand', 'np.random.rand', ({(384, 27, 384, 28): '2', (384, 30, 384, 31): '3', (384, 33, 384, 34): '4', (384, 36, 384, 39): '500'}, {}), '(2, 3, 4, 500)', True, 'import numpy as np\n'), ((385, 12, 385, 29), 'numpy.random.rand', 'np.random.rand', ({(385, 27, 385, 28): '1'}, {}), '(1)', True, 'import numpy as np\n'), ((396, 12, 396, 38), 'numpy.random.rand', 'np.random.rand', ({(396, 27, 396, 28): '2', (396, 30, 396, 31): '3', (396, 33, 396, 34): '4', (396, 36, 396, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((397, 12, 397, 38), 'numpy.random.rand', 'np.random.rand', ({(397, 27, 397, 28): '1', (397, 30, 397, 31): '3', (397, 33, 397, 34): '4', (397, 36, 397, 37): '1'}, {}), '(1, 3, 4, 1)', True, 'import numpy as np\n'), ((405, 14, 405, 33), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((417, 12, 417, 38), 'numpy.random.rand', 'np.random.rand', ({(417, 27, 417, 28): '2', (417, 30, 417, 31): '3', (417, 33, 417, 34): '4', (417, 36, 417, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((418, 12, 418, 32), 'numpy.random.rand', 'np.random.rand', ({(418, 27, 418, 28): '4', (418, 30, 418, 31): '5'}, {}), '(4, 5)', True, 'import numpy as np\n'), ((435, 12, 435, 38), 'numpy.random.rand', 'np.random.rand', ({(435, 27, 435, 28): '2', (435, 30, 435, 31): '3', (435, 33, 435, 34): '4', (435, 36, 435, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((436, 12, 436, 32), 'numpy.random.rand', 'np.random.rand', ({(436, 27, 436, 28): '2', (436, 30, 436, 31): '3'}, {}), '(2, 3)', True, 'import numpy as np\n'), ((453, 12, 453, 38), 'numpy.random.rand', 'np.random.rand', ({(453, 27, 453, 28): '2', (453, 30, 453, 31): '3', (453, 33, 453, 34): '4', (453, 36, 453, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((454, 12, 454, 32), 'numpy.random.rand', 'np.random.rand', ({(454, 27, 454, 28): '3', (454, 30, 454, 31): '4'}, {}), '(3, 4)', True, 'import numpy as np\n'), ((471, 12, 471, 38), 'numpy.random.rand', 'np.random.rand', ({(471, 27, 471, 28): '2', (471, 30, 471, 31): '3', (471, 33, 471, 34): '4', (471, 36, 471, 37): '5'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((472, 12, 472, 38), 'numpy.random.rand', 'np.random.rand', ({(472, 27, 472, 28): '1', (472, 30, 472, 31): '3', (472, 33, 472, 34): '4', (472, 36, 472, 37): '1'}, {}), '(1, 3, 4, 1)', True, 'import numpy as np\n'), ((187, 12, 187, 38), 'numpy.random.rand', 'np.random.rand', ({(187, 27, 187, 28): '(2)', (187, 30, 187, 31): '(3)', (187, 33, 187, 34): '(4)', (187, 36, 187, 37): '(5)'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((188, 12, 188, 32), 'numpy.random.rand', 'np.random.rand', ({(188, 27, 188, 28): '(4)', (188, 30, 188, 31): '(5)'}, {}), '(4, 5)', True, 'import numpy as np\n'), ((194, 32, 194, 45), 'numpy.sum', 'np.sum', ({(194, 39, 194, 41): 'GY', (194, 43, 194, 44): '(1)'}, {}), '(GY, 1)', True, 'import numpy as np\n'), ((205, 12, 205, 38), 'numpy.random.rand', 'np.random.rand', ({(205, 27, 205, 28): '(2)', (205, 30, 205, 31): '(3)', (205, 33, 205, 34): '(4)', (205, 36, 205, 37): '(5)'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((206, 12, 206, 32), 'numpy.random.rand', 'np.random.rand', ({(206, 27, 206, 28): '(3)', (206, 30, 206, 31): '(4)'}, {}), '(3, 4)', True, 'import numpy as np\n'), ((217, 32, 217, 45), 'numpy.sum', 'np.sum', ({(217, 39, 217, 41): 'GY', (217, 43, 217, 44): '(3)'}, {}), '(GY, 3)', True, 'import numpy as np\n'), ((228, 12, 228, 38), 'numpy.random.rand', 'np.random.rand', ({(228, 27, 228, 28): '(2)', (228, 30, 228, 31): '(3)', (228, 33, 228, 34): '(4)', (228, 36, 228, 37): '(5)'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((229, 12, 229, 29), 'numpy.random.rand', 'np.random.rand', ({(229, 27, 229, 28): '(2)'}, {}), '(2)', True, 'import numpy as np\n'), ((253, 12, 253, 38), 'numpy.random.rand', 'np.random.rand', ({(253, 27, 253, 28): '(2)', (253, 30, 253, 31): '(3)', (253, 33, 253, 34): '(4)', (253, 36, 253, 37): '(5)'}, {}), '(2, 3, 4, 5)', True, 'import numpy as np\n'), ((254, 12, 254, 35), 'numpy.random.rand', 'np.random.rand', ({(254, 27, 254, 28): '(1)', (254, 30, 254, 31): '(4)', (254, 33, 254, 34): '(1)'}, {}), '(1, 4, 1)', True, 'import numpy as np\n'), ((184, 25, 184, 43), 'numpy.power', 'np.power', ({(184, 34, 184, 35): 'X', (184, 37, 184, 42): '(Y - 1)'}, {}), '(X, Y - 1)', True, 'import numpy as np\n'), ((184, 49, 184, 58), 'numpy.log', 'np.log', ({(184, 56, 184, 57): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((242, 39, 242, 52), 'numpy.sum', 'np.sum', ({(242, 46, 242, 48): 'GY', (242, 50, 242, 51): '(3)'}, {}), '(GY, 3)', True, 'import numpy as np\n'), ((292, 12, 292, 29), 'numpy.random.rand', 'np.random.rand', ({(292, 27, 292, 28): '1'}, {}), '(1)', True, 'import numpy as np\n'), ((265, 50, 265, 63), 'numpy.sum', 'np.sum', ({(265, 57, 265, 59): 'GY', (265, 61, 265, 62): '(3)'}, {}), '(GY, 3)', True, 'import numpy as np\n')]
jovial/kayobe
kayobe/tests/unit/cli/test_commands.py
49e61fef4a221ee9fcfcee2b7bac02b6acc5bd0c
# Copyright (c) 2017 StackHPC Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import cliff.app import cliff.commandmanager import mock from kayobe.cli import commands from kayobe import utils class TestApp(cliff.app.App): def __init__(self): super(TestApp, self).__init__( description='Test app', version='0.1', command_manager=cliff.commandmanager.CommandManager('kayobe.cli')) class TestCase(unittest.TestCase): @mock.patch.object(utils, "galaxy_install", spec=True) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_control_host_bootstrap(self, mock_run, mock_install): command = commands.ControlHostBootstrap(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) mock_install.assert_called_once_with("requirements.yml", "ansible/roles") expected_calls = [ mock.call(mock.ANY, ["ansible/bootstrap.yml"]), mock.call(mock.ANY, ["ansible/kolla-ansible.yml"], tags="install"), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(utils, "galaxy_install", spec=True) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_control_host_upgrade(self, mock_run, mock_install): command = commands.ControlHostUpgrade(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) mock_install.assert_called_once_with("requirements.yml", "ansible/roles", force=True) expected_calls = [ mock.call(mock.ANY, ["ansible/bootstrap.yml"]), mock.call(mock.ANY, ["ansible/kolla-ansible.yml"], tags="install"), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_network_connectivity_check(self, mock_run): command = commands.NetworkConnectivityCheck(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call(mock.ANY, ["ansible/network-connectivity.yml"]), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_config_dump") @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_seed_hypervisor_host_configure(self, mock_run, mock_dump): command = commands.SeedHypervisorHostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) mock_dump.return_value = "stack" result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call(mock.ANY, host="seed-hypervisor", var_name="kayobe_ansible_user", tags="dump-config") ] self.assertEqual(expected_calls, mock_dump.call_args_list) expected_calls = [ mock.call( mock.ANY, [ "ansible/ip-allocation.yml", "ansible/ssh-known-host.yml", "ansible/kayobe-ansible-user.yml", "ansible/kayobe-target-venv.yml", "ansible/users.yml", "ansible/yum.yml", "ansible/dev-tools.yml", "ansible/network.yml", "ansible/sysctl.yml", "ansible/ntp.yml", "ansible/seed-hypervisor-libvirt-host.yml", ], limit="seed-hypervisor", ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_seed_hypervisor_host_upgrade(self, mock_run): command = commands.SeedHypervisorHostUpgrade(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/kayobe-target-venv.yml", "ansible/kolla-target-venv.yml", ], limit="seed-hypervisor", ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_config_dump") @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KollaAnsibleMixin, "run_kolla_ansible_seed") def test_seed_host_configure(self, mock_kolla_run, mock_run, mock_dump): command = commands.SeedHostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) mock_dump.return_value = { "seed": {"kayobe_ansible_user": "stack"} } result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call(mock.ANY, hosts="seed", tags="dump-config") ] self.assertEqual(expected_calls, mock_dump.call_args_list) expected_calls = [ mock.call( mock.ANY, [ "ansible/ip-allocation.yml", "ansible/ssh-known-host.yml", "ansible/kayobe-ansible-user.yml", "ansible/kayobe-target-venv.yml", "ansible/users.yml", "ansible/yum.yml", "ansible/dev-tools.yml", "ansible/disable-selinux.yml", "ansible/network.yml", "ansible/sysctl.yml", "ansible/ip-routing.yml", "ansible/snat.yml", "ansible/disable-glean.yml", "ansible/ntp.yml", "ansible/lvm.yml", ], limit="seed", ), mock.call( mock.ANY, ["ansible/kolla-ansible.yml"], tags="config", ), mock.call( mock.ANY, [ "ansible/kolla-target-venv.yml", "ansible/kolla-host.yml", "ansible/docker.yml", ], limit="seed", ), ] self.assertEqual(expected_calls, mock_run.call_args_list) expected_calls = [ mock.call( mock.ANY, "bootstrap-servers", extra_vars={"ansible_user": "stack"}, ), ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_config_dump") @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KollaAnsibleMixin, "run_kolla_ansible_seed") def test_seed_host_configure_kayobe_venv(self, mock_kolla_run, mock_run, mock_dump): command = commands.SeedHostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) mock_dump.return_value = { "seed": { "ansible_python_interpreter": "/kayobe/venv/bin/python", "kayobe_ansible_user": "stack", } } result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, "bootstrap-servers", extra_vars={ "ansible_python_interpreter": "/kayobe/venv/bin/python", "ansible_user": "stack", }, ), ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_config_dump") @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KollaAnsibleMixin, "run_kolla_ansible_seed") def test_seed_host_configure_kolla_venv(self, mock_kolla_run, mock_run, mock_dump): command = commands.SeedHostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) mock_dump.return_value = { "seed": { "kayobe_ansible_user": "stack", "kolla_ansible_target_venv": "/kolla/venv/bin/python", } } result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, "bootstrap-servers", extra_vars={ "ansible_python_interpreter": "/usr/bin/python", "ansible_user": "stack", "virtualenv": "/kolla/venv/bin/python", }, ), ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_config_dump") @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KollaAnsibleMixin, "run_kolla_ansible_seed") def test_seed_host_configure_both_venvs(self, mock_kolla_run, mock_run, mock_dump): command = commands.SeedHostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) mock_dump.return_value = { "seed": { "ansible_python_interpreter": "/kayobe/venv/bin/python", "kayobe_ansible_user": "stack", "kolla_ansible_target_venv": "/kolla/venv/bin/python", } } result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, "bootstrap-servers", extra_vars={ "ansible_python_interpreter": "/kayobe/venv/bin/python", "ansible_user": "stack", "virtualenv": "/kolla/venv/bin/python", }, ), ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_seed_host_upgrade(self, mock_run): command = commands.SeedHostUpgrade(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/kayobe-target-venv.yml", "ansible/kolla-target-venv.yml", ], limit="seed", ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_seed_container_image_build(self, mock_run): command = commands.SeedContainerImageBuild(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/container-image-builders-check.yml", "ansible/kolla-build.yml", "ansible/container-image-build.yml" ], extra_vars={ "container_image_sets": ( "{{ seed_container_image_sets }}"), "push_images": False, } ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_seed_container_image_build_with_regex(self, mock_run): command = commands.SeedContainerImageBuild(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args(["--push", "^regex1$", "^regex2$"]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/container-image-builders-check.yml", "ansible/kolla-build.yml", "ansible/container-image-build.yml" ], extra_vars={ "container_image_regexes": "'^regex1$ ^regex2$'", "push_images": True, } ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KollaAnsibleMixin, "run_kolla_ansible_seed") def test_service_deploy(self, mock_kolla_run, mock_run): command = commands.SeedServiceDeploy(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, ["ansible/kolla-ansible.yml"], tags="config", ), mock.call( mock.ANY, ["ansible/kolla-bifrost.yml"], ), mock.call( mock.ANY, [ "ansible/overcloud-host-image-workaround-resolv.yml", "ansible/seed-introspection-rules.yml", "ansible/dell-switch-bmp.yml", ], ), ] self.assertEqual(expected_calls, mock_run.call_args_list) expected_calls = [ mock.call( mock.ANY, "deploy-bifrost", ), ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_config_dump") @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KollaAnsibleMixin, "run_kolla_ansible_overcloud") def test_overcloud_host_configure(self, mock_kolla_run, mock_run, mock_dump): command = commands.OvercloudHostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) mock_dump.return_value = { "controller0": {"kayobe_ansible_user": "stack"} } result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call(mock.ANY, hosts="overcloud", tags="dump-config") ] self.assertEqual(expected_calls, mock_dump.call_args_list) expected_calls = [ mock.call( mock.ANY, [ "ansible/ip-allocation.yml", "ansible/ssh-known-host.yml", "ansible/kayobe-ansible-user.yml", "ansible/kayobe-target-venv.yml", "ansible/users.yml", "ansible/yum.yml", "ansible/dev-tools.yml", "ansible/disable-selinux.yml", "ansible/network.yml", "ansible/sysctl.yml", "ansible/disable-glean.yml", "ansible/disable-cloud-init.yml", "ansible/ntp.yml", "ansible/lvm.yml", ], limit="overcloud", ), mock.call( mock.ANY, ["ansible/kolla-ansible.yml"], tags="config", ), mock.call( mock.ANY, [ "ansible/kolla-target-venv.yml", "ansible/kolla-host.yml", "ansible/docker.yml", "ansible/ceph-block-devices.yml", ], limit="overcloud", ), ] self.assertEqual(expected_calls, mock_run.call_args_list) expected_calls = [ mock.call( mock.ANY, "bootstrap-servers", extra_vars={"ansible_user": "stack"}, ), ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_config_dump") @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KollaAnsibleMixin, "run_kolla_ansible_overcloud") def test_overcloud_host_configure_kayobe_venv(self, mock_kolla_run, mock_run, mock_dump): command = commands.OvercloudHostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) mock_dump.return_value = { "controller0": { "ansible_python_interpreter": "/kayobe/venv/bin/python", "kayobe_ansible_user": "stack", } } result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, "bootstrap-servers", extra_vars={ "ansible_python_interpreter": "/kayobe/venv/bin/python", "ansible_user": "stack", } ), ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_config_dump") @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KollaAnsibleMixin, "run_kolla_ansible_overcloud") def test_overcloud_host_configure_kolla_venv(self, mock_kolla_run, mock_run, mock_dump): command = commands.OvercloudHostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) mock_dump.return_value = { "controller0": { "kayobe_ansible_user": "stack", "kolla_ansible_target_venv": "/kolla/venv/bin/python", } } result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, "bootstrap-servers", extra_vars={ "ansible_python_interpreter": "/usr/bin/python", "ansible_user": "stack", "virtualenv": "/kolla/venv/bin/python", } ), ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_config_dump") @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") @mock.patch.object(commands.KollaAnsibleMixin, "run_kolla_ansible_overcloud") def test_overcloud_host_configure_both_venvs(self, mock_kolla_run, mock_run, mock_dump): command = commands.OvercloudHostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) mock_dump.return_value = { "controller0": { "ansible_python_interpreter": "/kayobe/venv/bin/python", "kayobe_ansible_user": "stack", "kolla_ansible_target_venv": "/kolla/venv/bin/python", } } result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, "bootstrap-servers", extra_vars={ "ansible_python_interpreter": "/kayobe/venv/bin/python", "ansible_user": "stack", "virtualenv": "/kolla/venv/bin/python", } ), ] self.assertEqual(expected_calls, mock_kolla_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_overcloud_host_upgrade(self, mock_run): command = commands.OvercloudHostUpgrade(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/kayobe-target-venv.yml", "ansible/kolla-target-venv.yml", "ansible/overcloud-docker-sdk-upgrade.yml", "ansible/overcloud-etc-hosts-fixup.yml", ], limit="overcloud", ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_overcloud_container_image_build(self, mock_run): command = commands.OvercloudContainerImageBuild(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/container-image-builders-check.yml", "ansible/kolla-build.yml", "ansible/container-image-build.yml" ], extra_vars={ "container_image_sets": ( "{{ overcloud_container_image_sets }}"), "push_images": False, } ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_overcloud_container_image_build_with_regex(self, mock_run): command = commands.OvercloudContainerImageBuild(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args(["--push", "^regex1$", "^regex2$"]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/container-image-builders-check.yml", "ansible/kolla-build.yml", "ansible/container-image-build.yml" ], extra_vars={ "container_image_regexes": "'^regex1$ ^regex2$'", "push_images": True, } ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_overcloud_post_configure(self, mock_run): command = commands.OvercloudPostConfigure(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ 'ansible/overcloud-ipa-images.yml', 'ansible/overcloud-introspection-rules.yml', 'ansible/overcloud-introspection-rules-dell-lldp-workaround.yml', # noqa 'ansible/provision-net.yml', 'ansible/overcloud-grafana-configure.yml' ], ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_baremetal_compute_inspect(self, mock_run): command = commands.BaremetalComputeInspect(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/baremetal-compute-inspect.yml", ], ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_baremetal_compute_manage(self, mock_run): command = commands.BaremetalComputeManage(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/baremetal-compute-manage.yml", ], ), ] self.assertEqual(expected_calls, mock_run.call_args_list) @mock.patch.object(commands.KayobeAnsibleMixin, "run_kayobe_playbooks") def test_baremetal_compute_provide(self, mock_run): command = commands.BaremetalComputeProvide(TestApp(), []) parser = command.get_parser("test") parsed_args = parser.parse_args([]) result = command.run(parsed_args) self.assertEqual(0, result) expected_calls = [ mock.call( mock.ANY, [ "ansible/baremetal-compute-provide.yml", ], ), ] self.assertEqual(expected_calls, mock_run.call_args_list)
[((36, 5, 36, 58), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((37, 5, 38, 46), 'mock.patch.object', 'mock.patch.object', ({(37, 23, 37, 50): 'commands.KayobeAnsibleMixin', (38, 23, 38, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((54, 5, 54, 58), 'mock.patch.object', 'mock.patch.object', (), '', False, 'import mock\n'), ((55, 5, 56, 46), 'mock.patch.object', 'mock.patch.object', ({(55, 23, 55, 50): 'commands.KayobeAnsibleMixin', (56, 23, 56, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((72, 5, 73, 46), 'mock.patch.object', 'mock.patch.object', ({(72, 23, 72, 50): 'commands.KayobeAnsibleMixin', (73, 23, 73, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((85, 5, 86, 48), 'mock.patch.object', 'mock.patch.object', ({(85, 23, 85, 50): 'commands.KayobeAnsibleMixin', (86, 23, 86, 47): '"""run_kayobe_config_dump"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_config_dump')", False, 'import mock\n'), ((87, 5, 88, 46), 'mock.patch.object', 'mock.patch.object', ({(87, 23, 87, 50): 'commands.KayobeAnsibleMixin', (88, 23, 88, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((125, 5, 126, 46), 'mock.patch.object', 'mock.patch.object', ({(125, 23, 125, 50): 'commands.KayobeAnsibleMixin', (126, 23, 126, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((147, 5, 148, 48), 'mock.patch.object', 'mock.patch.object', ({(147, 23, 147, 50): 'commands.KayobeAnsibleMixin', (148, 23, 148, 47): '"""run_kayobe_config_dump"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_config_dump')", False, 'import mock\n'), ((149, 5, 150, 46), 'mock.patch.object', 'mock.patch.object', ({(149, 23, 149, 50): 'commands.KayobeAnsibleMixin', (150, 23, 150, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((151, 5, 152, 48), 'mock.patch.object', 'mock.patch.object', ({(151, 23, 151, 49): 'commands.KollaAnsibleMixin', (152, 23, 152, 47): '"""run_kolla_ansible_seed"""'}, {}), "(commands.KollaAnsibleMixin, 'run_kolla_ansible_seed')", False, 'import mock\n'), ((217, 5, 218, 48), 'mock.patch.object', 'mock.patch.object', ({(217, 23, 217, 50): 'commands.KayobeAnsibleMixin', (218, 23, 218, 47): '"""run_kayobe_config_dump"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_config_dump')", False, 'import mock\n'), ((219, 5, 220, 46), 'mock.patch.object', 'mock.patch.object', ({(219, 23, 219, 50): 'commands.KayobeAnsibleMixin', (220, 23, 220, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((221, 5, 222, 48), 'mock.patch.object', 'mock.patch.object', ({(221, 23, 221, 49): 'commands.KollaAnsibleMixin', (222, 23, 222, 47): '"""run_kolla_ansible_seed"""'}, {}), "(commands.KollaAnsibleMixin, 'run_kolla_ansible_seed')", False, 'import mock\n'), ((250, 5, 251, 48), 'mock.patch.object', 'mock.patch.object', ({(250, 23, 250, 50): 'commands.KayobeAnsibleMixin', (251, 23, 251, 47): '"""run_kayobe_config_dump"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_config_dump')", False, 'import mock\n'), ((252, 5, 253, 46), 'mock.patch.object', 'mock.patch.object', ({(252, 23, 252, 50): 'commands.KayobeAnsibleMixin', (253, 23, 253, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((254, 5, 255, 48), 'mock.patch.object', 'mock.patch.object', ({(254, 23, 254, 49): 'commands.KollaAnsibleMixin', (255, 23, 255, 47): '"""run_kolla_ansible_seed"""'}, {}), "(commands.KollaAnsibleMixin, 'run_kolla_ansible_seed')", False, 'import mock\n'), ((284, 5, 285, 48), 'mock.patch.object', 'mock.patch.object', ({(284, 23, 284, 50): 'commands.KayobeAnsibleMixin', (285, 23, 285, 47): '"""run_kayobe_config_dump"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_config_dump')", False, 'import mock\n'), ((286, 5, 287, 46), 'mock.patch.object', 'mock.patch.object', ({(286, 23, 286, 50): 'commands.KayobeAnsibleMixin', (287, 23, 287, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((288, 5, 289, 48), 'mock.patch.object', 'mock.patch.object', ({(288, 23, 288, 49): 'commands.KollaAnsibleMixin', (289, 23, 289, 47): '"""run_kolla_ansible_seed"""'}, {}), "(commands.KollaAnsibleMixin, 'run_kolla_ansible_seed')", False, 'import mock\n'), ((319, 5, 320, 46), 'mock.patch.object', 'mock.patch.object', ({(319, 23, 319, 50): 'commands.KayobeAnsibleMixin', (320, 23, 320, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((341, 5, 342, 46), 'mock.patch.object', 'mock.patch.object', ({(341, 23, 341, 50): 'commands.KayobeAnsibleMixin', (342, 23, 342, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((366, 5, 367, 46), 'mock.patch.object', 'mock.patch.object', ({(366, 23, 366, 50): 'commands.KayobeAnsibleMixin', (367, 23, 367, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((390, 5, 391, 46), 'mock.patch.object', 'mock.patch.object', ({(390, 23, 390, 50): 'commands.KayobeAnsibleMixin', (391, 23, 391, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((392, 5, 393, 48), 'mock.patch.object', 'mock.patch.object', ({(392, 23, 392, 49): 'commands.KollaAnsibleMixin', (393, 23, 393, 47): '"""run_kolla_ansible_seed"""'}, {}), "(commands.KollaAnsibleMixin, 'run_kolla_ansible_seed')", False, 'import mock\n'), ((431, 5, 432, 48), 'mock.patch.object', 'mock.patch.object', ({(431, 23, 431, 50): 'commands.KayobeAnsibleMixin', (432, 23, 432, 47): '"""run_kayobe_config_dump"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_config_dump')", False, 'import mock\n'), ((433, 5, 434, 46), 'mock.patch.object', 'mock.patch.object', ({(433, 23, 433, 50): 'commands.KayobeAnsibleMixin', (434, 23, 434, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((435, 5, 436, 53), 'mock.patch.object', 'mock.patch.object', ({(435, 23, 435, 49): 'commands.KollaAnsibleMixin', (436, 23, 436, 52): '"""run_kolla_ansible_overcloud"""'}, {}), "(commands.KollaAnsibleMixin, 'run_kolla_ansible_overcloud')", False, 'import mock\n'), ((502, 5, 503, 48), 'mock.patch.object', 'mock.patch.object', ({(502, 23, 502, 50): 'commands.KayobeAnsibleMixin', (503, 23, 503, 47): '"""run_kayobe_config_dump"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_config_dump')", False, 'import mock\n'), ((504, 5, 505, 46), 'mock.patch.object', 'mock.patch.object', ({(504, 23, 504, 50): 'commands.KayobeAnsibleMixin', (505, 23, 505, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((506, 5, 507, 53), 'mock.patch.object', 'mock.patch.object', ({(506, 23, 506, 49): 'commands.KollaAnsibleMixin', (507, 23, 507, 52): '"""run_kolla_ansible_overcloud"""'}, {}), "(commands.KollaAnsibleMixin, 'run_kolla_ansible_overcloud')", False, 'import mock\n'), ((535, 5, 536, 48), 'mock.patch.object', 'mock.patch.object', ({(535, 23, 535, 50): 'commands.KayobeAnsibleMixin', (536, 23, 536, 47): '"""run_kayobe_config_dump"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_config_dump')", False, 'import mock\n'), ((537, 5, 538, 46), 'mock.patch.object', 'mock.patch.object', ({(537, 23, 537, 50): 'commands.KayobeAnsibleMixin', (538, 23, 538, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((539, 5, 540, 53), 'mock.patch.object', 'mock.patch.object', ({(539, 23, 539, 49): 'commands.KollaAnsibleMixin', (540, 23, 540, 52): '"""run_kolla_ansible_overcloud"""'}, {}), "(commands.KollaAnsibleMixin, 'run_kolla_ansible_overcloud')", False, 'import mock\n'), ((569, 5, 570, 48), 'mock.patch.object', 'mock.patch.object', ({(569, 23, 569, 50): 'commands.KayobeAnsibleMixin', (570, 23, 570, 47): '"""run_kayobe_config_dump"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_config_dump')", False, 'import mock\n'), ((571, 5, 572, 46), 'mock.patch.object', 'mock.patch.object', ({(571, 23, 571, 50): 'commands.KayobeAnsibleMixin', (572, 23, 572, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((573, 5, 574, 53), 'mock.patch.object', 'mock.patch.object', ({(573, 23, 573, 49): 'commands.KollaAnsibleMixin', (574, 23, 574, 52): '"""run_kolla_ansible_overcloud"""'}, {}), "(commands.KollaAnsibleMixin, 'run_kolla_ansible_overcloud')", False, 'import mock\n'), ((604, 5, 605, 46), 'mock.patch.object', 'mock.patch.object', ({(604, 23, 604, 50): 'commands.KayobeAnsibleMixin', (605, 23, 605, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((628, 5, 629, 46), 'mock.patch.object', 'mock.patch.object', ({(628, 23, 628, 50): 'commands.KayobeAnsibleMixin', (629, 23, 629, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((653, 5, 654, 46), 'mock.patch.object', 'mock.patch.object', ({(653, 23, 653, 50): 'commands.KayobeAnsibleMixin', (654, 23, 654, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((677, 5, 678, 46), 'mock.patch.object', 'mock.patch.object', ({(677, 23, 677, 50): 'commands.KayobeAnsibleMixin', (678, 23, 678, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((701, 5, 702, 46), 'mock.patch.object', 'mock.patch.object', ({(701, 23, 701, 50): 'commands.KayobeAnsibleMixin', (702, 23, 702, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((719, 5, 720, 46), 'mock.patch.object', 'mock.patch.object', ({(719, 23, 719, 50): 'commands.KayobeAnsibleMixin', (720, 23, 720, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((737, 5, 738, 46), 'mock.patch.object', 'mock.patch.object', ({(737, 23, 737, 50): 'commands.KayobeAnsibleMixin', (738, 23, 738, 45): '"""run_kayobe_playbooks"""'}, {}), "(commands.KayobeAnsibleMixin, 'run_kayobe_playbooks')", False, 'import mock\n'), ((48, 12, 48, 58), 'mock.call', 'mock.call', ({(48, 22, 48, 30): 'mock.ANY', (48, 32, 48, 57): "['ansible/bootstrap.yml']"}, {}), "(mock.ANY, ['ansible/bootstrap.yml'])", False, 'import mock\n'), ((49, 12, 50, 37), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((66, 12, 66, 58), 'mock.call', 'mock.call', ({(66, 22, 66, 30): 'mock.ANY', (66, 32, 66, 57): "['ansible/bootstrap.yml']"}, {}), "(mock.ANY, ['ansible/bootstrap.yml'])", False, 'import mock\n'), ((67, 12, 68, 37), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((81, 12, 81, 69), 'mock.call', 'mock.call', ({(81, 22, 81, 30): 'mock.ANY', (81, 32, 81, 68): "['ansible/network-connectivity.yml']"}, {}), "(mock.ANY, ['ansible/network-connectivity.yml'])", False, 'import mock\n'), ((99, 12, 100, 73), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((105, 12, 121, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((136, 12, 143, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((165, 12, 165, 65), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((170, 12, 190, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((191, 12, 195, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((196, 12, 204, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((209, 12, 213, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((239, 12, 246, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((272, 12, 280, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((307, 12, 315, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((330, 12, 337, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((350, 12, 362, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((375, 12, 386, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((403, 12, 407, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((408, 12, 411, 13), 'mock.call', 'mock.call', ({(409, 16, 409, 24): 'mock.ANY', (410, 16, 410, 45): "['ansible/kolla-bifrost.yml']"}, {}), "(mock.ANY, ['ansible/kolla-bifrost.yml'])", False, 'import mock\n'), ((412, 12, 419, 13), 'mock.call', 'mock.call', ({(413, 16, 413, 24): 'mock.ANY', (414, 16, 418, 17): "['ansible/overcloud-host-image-workaround-resolv.yml',\n 'ansible/seed-introspection-rules.yml', 'ansible/dell-switch-bmp.yml']"}, {}), "(mock.ANY, ['ansible/overcloud-host-image-workaround-resolv.yml',\n 'ansible/seed-introspection-rules.yml', 'ansible/dell-switch-bmp.yml'])", False, 'import mock\n'), ((424, 12, 427, 13), 'mock.call', 'mock.call', ({(425, 16, 425, 24): 'mock.ANY', (426, 16, 426, 32): '"""deploy-bifrost"""'}, {}), "(mock.ANY, 'deploy-bifrost')", False, 'import mock\n'), ((450, 12, 450, 70), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((455, 12, 474, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((475, 12, 479, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((480, 12, 489, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((494, 12, 498, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((524, 12, 531, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((557, 12, 565, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((592, 12, 600, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((615, 12, 624, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((637, 12, 649, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((662, 12, 673, 13), 'mock.call', 'mock.call', (), '', False, 'import mock\n'), ((688, 12, 697, 13), 'mock.call', 'mock.call', ({(689, 16, 689, 24): 'mock.ANY', (690, 16, 696, 17): "['ansible/overcloud-ipa-images.yml',\n 'ansible/overcloud-introspection-rules.yml',\n 'ansible/overcloud-introspection-rules-dell-lldp-workaround.yml',\n 'ansible/provision-net.yml', 'ansible/overcloud-grafana-configure.yml']"}, {}), "(mock.ANY, ['ansible/overcloud-ipa-images.yml',\n 'ansible/overcloud-introspection-rules.yml',\n 'ansible/overcloud-introspection-rules-dell-lldp-workaround.yml',\n 'ansible/provision-net.yml', 'ansible/overcloud-grafana-configure.yml'])", False, 'import mock\n'), ((710, 12, 715, 13), 'mock.call', 'mock.call', ({(711, 16, 711, 24): 'mock.ANY', (712, 16, 714, 17): "['ansible/baremetal-compute-inspect.yml']"}, {}), "(mock.ANY, ['ansible/baremetal-compute-inspect.yml'])", False, 'import mock\n'), ((728, 12, 733, 13), 'mock.call', 'mock.call', ({(729, 16, 729, 24): 'mock.ANY', (730, 16, 732, 17): "['ansible/baremetal-compute-manage.yml']"}, {}), "(mock.ANY, ['ansible/baremetal-compute-manage.yml'])", False, 'import mock\n'), ((746, 12, 751, 13), 'mock.call', 'mock.call', ({(747, 16, 747, 24): 'mock.ANY', (748, 16, 750, 17): "['ansible/baremetal-compute-provide.yml']"}, {}), "(mock.ANY, ['ansible/baremetal-compute-provide.yml'])", False, 'import mock\n')]
CNugteren/keras-onnx
keras2onnx/proto/tfcompat.py
b3d6b6486fe56640c48c62dd098e9405e35b4e9f
############################################################################### # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. ############################################################################### import os import tensorflow as _tf from distutils.version import StrictVersion is_tf2 = StrictVersion(_tf.__version__.split('-')[0]) >= StrictVersion('2.0.0') def normalize_tensor_shape(tensor_shape): if is_tf2: return [d for d in tensor_shape] else: return [d.value for d in tensor_shape] def dump_graph_into_tensorboard(tf_graph): # type: (_tf.Graph) -> None _tb_log_dir = os.environ.get('TB_LOG_DIR') if _tb_log_dir: if is_tf2: from tensorflow.python.ops.summary_ops_v2 import graph as write_graph pb_visual_writer = _tf.summary.create_file_writer(_tb_log_dir) with pb_visual_writer.as_default(): write_graph(tf_graph) else: from tensorflow.python.summary import summary pb_visual_writer = summary.FileWriter(_tb_log_dir) pb_visual_writer.add_graph(tf_graph) if is_tf2: tensorflow = _tf.compat.v1 def is_subclassed(layer): """Returns True if the object is a subclassed layer or subclassed model.""" return (layer.__module__.find('keras.engine') == -1 and layer.__module__.find('keras.layers') == -1) else: tensorflow = _tf def is_subclassed(layer): return False
[((11, 57, 11, 79), 'distutils.version.StrictVersion', 'StrictVersion', ({(11, 71, 11, 78): '"""2.0.0"""'}, {}), "('2.0.0')", False, 'from distutils.version import StrictVersion\n'), ((23, 18, 23, 46), 'os.environ.get', 'os.environ.get', ({(23, 33, 23, 45): '"""TB_LOG_DIR"""'}, {}), "('TB_LOG_DIR')", False, 'import os\n'), ((11, 23, 11, 49), 'tensorflow.__version__.split', '_tf.__version__.split', ({(11, 45, 11, 48): '"""-"""'}, {}), "('-')", True, 'import tensorflow as _tf\n'), ((27, 31, 27, 74), 'tensorflow.summary.create_file_writer', '_tf.summary.create_file_writer', ({(27, 62, 27, 73): '_tb_log_dir'}, {}), '(_tb_log_dir)', True, 'import tensorflow as _tf\n'), ((32, 31, 32, 62), 'tensorflow.python.summary.summary.FileWriter', 'summary.FileWriter', ({(32, 50, 32, 61): '_tb_log_dir'}, {}), '(_tb_log_dir)', False, 'from tensorflow.python.summary import summary\n'), ((29, 16, 29, 37), 'tensorflow.python.ops.summary_ops_v2.graph', 'write_graph', ({(29, 28, 29, 36): 'tf_graph'}, {}), '(tf_graph)', True, 'from tensorflow.python.ops.summary_ops_v2 import graph as write_graph\n')]
excentis/ByteBlower_python_examples
back2back/httpmulticlient.py
0e082e17413abf5e25f6d14b85e50e7f73e7f965
""" HTTP MultiServer/MultiClient for the ByteBlower Python API. All examples are guaranteed to work with Python 2.7 and above Copyright 2018, Excentis N.V. """ # Needed for python2 / python3 print function compatibility from __future__ import print_function # import the ByteBlower module import byteblowerll.byteblower as byteblower import time configuration = { # Address (IP or FQDN) of the ByteBlower server to use 'server_address': 'byteblower-tp-1300.lab.byteblower.excentis.com', # Configuration for the first ByteBlower port. # Will be used as HTTP server. 'port_1_config': { 'interface': 'trunk-1-13', 'mac': '00:bb:01:00:00:01', # IP configuration for the ByteBlower Port. # Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static' # if DHCPv4, use "dhcpv4" 'ip': 'dhcpv4', # if DHCPv6, use "dhcpv6" # 'ip': 'dhcpv6', # if SLAAC, use "slaac" # 'ip': 'slaac', # if staticv4, use ["ipaddress", netmask, gateway] # 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"], # if staticv6, use ["ipaddress", prefixlength] # 'ip': ['3000:3128::24', '64'], # TCP port number to be used by the HTTP connection. # On the HTTP server, this will be the port on which the server # listens. 'tcp_port': 4096 }, # Configuration for the second ByteBlower port. # Will be used as HTTP client. 'port_2_config': { 'interface': 'trunk-1-25', 'mac': '00:bb:01:00:00:02', # IP configuration for the ByteBlower Port. # Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static' # if DHCPv4, use "dhcpv4" 'ip': 'dhcpv4', # if DHCPv6, use "dhcpv6" # ip': 'dhcpv6', # if SLAAC, use "slaac" # 'ip': 'slaac', # if staticv4, use ["ipaddress", netmask, gateway] # 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"], # if staticv6, use ["ipaddress", prefixlength] # 'ip': ['3000:3128::24', '64'], # TCP port range the HTTP Clients will use to connect with # the HTTP server 'tcp_port_min': 32000, 'tcp_port_max': 50000 }, # HTTP Method # HTTP Method can be GET or PUT # - GET: Standard HTTP download, we retrieve data from the web server # - PUT: Standard HTTP upload, the wireless endpoint will push data to the # webserver 'http_method': 'GET', # 'http_method': 'PUT', # total duration, in nanoseconds. # This is the duration of the flow. When this duration expires, # all sessions will be stopped. 'duration': 10000000000, # session duration, in nanoseconds # Duration of the individual sessions # 'session_duration': 1500000000, 'session_duration': None, # session size, in bytes # The number of bytes transmitted by a session 'session_size': 1 * 1000 * 1000, # 'session_size': None, # max concurrent sessions # Maximum number of sessions that will be running simultaneously 'max_concurrent_sessions': 100, # maximum number of sessions # No more than this number of sessions will be created # 0 means no limit 'max_total_sessions': 0, # TOS value to use on the HTTP client (and server) 'tos': 0 } class Example: def __init__(self, **kwargs): self.server_address = kwargs['server_address'] self.port_1_config = kwargs['port_1_config'] self.port_2_config = kwargs['port_2_config'] # Helper function, we can use this to parse the HTTP Method to the # enumeration used by the API from byteblowerll.byteblower import ParseHTTPRequestMethodFromString http_method_arg = kwargs['http_method'] self.http_method = ParseHTTPRequestMethodFromString(http_method_arg) self.duration = kwargs['duration'] self.session_duration = kwargs['session_duration'] self.session_size = kwargs['session_size'] self.max_concurrent_sessions = kwargs['max_concurrent_sessions'] self.max_total_sessions = kwargs['max_total_sessions'] self.tos = kwargs['tos'] self.server = None self.port_1 = None self.port_2 = None def cleanup(self): """Clean up the created objects""" byteblower_instance = byteblower.ByteBlower.InstanceGet() if self.port_1: self.server.PortDestroy(self.port_1) self.port_1 = None if self.port_2: self.server.PortDestroy(self.port_2) self.port_2 = None if self.server is not None: byteblower_instance.ServerRemove(self.server) self.server = None def run(self): byteblower_instance = byteblower.ByteBlower.InstanceGet() print("Connecting to ByteBlower server %s..." % self.server_address) self.server = byteblower_instance.ServerAdd(self.server_address) # Create the port which will be the HTTP server (port_1) print("Creating HTTP Server port") self.port_1 = self.provision_port(self.port_1_config) print("Creating HTTP Client port") # Create the port which will be the HTTP client (port_2) self.port_2 = self.provision_port(self.port_2_config) http_server_ip_address = self.port_1_config['ip_address'] # create a HTTP server http_server = self.port_1.ProtocolHttpMultiServerAdd() server_tcp_port = self.port_1_config['tcp_port'] if server_tcp_port is not None: http_server.PortSet(server_tcp_port) else: server_tcp_port = http_server.PortGet() # create a HTTP Client http_client = self.port_2.ProtocolHttpMultiClientAdd() # - remote endpoint http_client.RemoteAddressSet(http_server_ip_address) http_client.RemotePortSet(server_tcp_port) # - local endpoint http_client.LocalPortRangeSet(self.port_2_config['tcp_port_min'], self.port_2_config['tcp_port_max']) # Configure the direction. # If the HTTP Method is GET, # traffic will flow from the HTTP server to the HTTP client # If the HTTP Method is PUT, # traffic will flow from the HTTP client to the HTTP server http_client.HttpMethodSet(self.http_method) print("Server port:", self.port_1.DescriptionGet()) print("Client port:", self.port_2.DescriptionGet()) # let the HTTP server listen for requests http_server.Start() # - total duration of all sessions http_client.DurationSet(self.duration) # - how many connections can be created? http_client.CumulativeConnectionLimitSet(self.max_total_sessions) # - how many connections can be running at the same time http_client.MaximumConcurrentRequestsSet(self.max_concurrent_sessions) # - individual duration, can be size-based or time-based if self.session_duration is not None: # let the HTTP Client request a page of a specific duration # to download... http_client.SessionDurationSet(self.session_duration) elif self.session_size is not None: # let the HTTP Client request a page of a specific size... http_client.SessionSizeSet(self.session_size) else: raise ValueError("Either duration or request_size must be configured") print("Starting the HTTP client") http_client.Start() http_client_result = http_client.ResultGet() for iteration in range(10): time.sleep(1) http_client_result.Refresh() print("-" * 10) print("Iteration", iteration+1) print(" connections attempted", http_client_result.ConnectionsAttemptedGet()) print(" connections established", http_client_result.ConnectionsEstablishedGet()) print(" connections aborted", http_client_result.ConnectionsAbortedGet()) print(" connections refused", http_client_result.ConnectionsRefusedGet()) print("-" * 10) http_client.Stop() http_server.Stop() print("Stopped the HTTP client") request_status_value = http_client.StatusGet() request_status_string = byteblower.ConvertHTTPMultiClientStatusToString(request_status_value) http_client_result.Refresh() tx_bytes = http_client_result.TcpTxByteCountGet() tx_speed = http_client_result.TcpTxSpeedGet() rx_bytes = http_client_result.TcpRxByteCountGet() rx_speed = http_client_result.TcpRxSpeedGet() http_server_result = http_server.ResultGet() http_server_result.Refresh() print("Requested Duration : {} nanoseconds".format(self.duration)) print("Status : {}".format(request_status_string)) print("Client Result data : {}".format(http_client_result.DescriptionGet())) print("Server Result data : {}".format(http_server_result.DescriptionGet())) return [ self.duration, self.session_duration, self.session_size, self.max_total_sessions, self.max_concurrent_sessions, tx_bytes, rx_bytes, tx_speed, rx_speed, request_status_value ] def provision_port(self, config): port = self.server.PortCreate(config['interface']) port_l2 = port.Layer2EthIISet() port_l2.MacSet(config['mac']) ip_config = config['ip'] if not isinstance(ip_config, list): # Config is not static, DHCP or slaac if ip_config.lower() == "dhcpv4": port_l3 = port.Layer3IPv4Set() port_l3.ProtocolDhcpGet().Perform() config['ip_address'] = port_l3.IpGet() elif ip_config.lower() == "dhcpv6": port_l3 = port.Layer3IPv6Set() port_l3.ProtocolDhcpGet().Perform() config['ip_address'] = port_l3.IpDhcpGet() elif ip_config.lower() == "slaac": port_l3 = port.Layer3IPv6Set() port_l3.StatelessAutoconfiguration() config['ip_address'] = port_l3.IpStatelessGet() else: # Static configuration if len(ip_config) == 3: # IPv4 port_l3 = port.Layer3IPv4Set() port_l3.IpSet(ip_config[0]) port_l3.NetmaskSet(ip_config[1]) port_l3.GatewaySet(ip_config[2]) config['ip_address'] = port_l3.IpGet() elif len(ip_config) == 2: port_l3 = port.Layer3IPv6Set() # IPv6 address = ip_config[0] prefix_length = ip_config[1] ip = "{}/{}".format(address, prefix_length) port_l3.IpManualAdd(ip) config['ip_address'] = ip_config[0] if not isinstance(config['ip_address'], str): ip = config['ip_address'][0] if '/' in ip: config['ip_address'] = ip.split('/')[0] print("Created port", port.DescriptionGet()) return port # When this python module is called stand-alone, the run-function must be # called. This approach makes it possible to include it in a series of # examples. if __name__ == "__main__": example = Example(**configuration) try: example.run() finally: example.cleanup()
[((116, 27, 116, 76), 'byteblowerll.byteblower.ParseHTTPRequestMethodFromString', 'ParseHTTPRequestMethodFromString', ({(116, 60, 116, 75): 'http_method_arg'}, {}), '(http_method_arg)', False, 'from byteblowerll.byteblower import ParseHTTPRequestMethodFromString\n'), ((130, 30, 130, 65), 'byteblowerll.byteblower.ByteBlower.InstanceGet', 'byteblower.ByteBlower.InstanceGet', ({}, {}), '()', True, 'import byteblowerll.byteblower as byteblower\n'), ((144, 30, 144, 65), 'byteblowerll.byteblower.ByteBlower.InstanceGet', 'byteblower.ByteBlower.InstanceGet', ({}, {}), '()', True, 'import byteblowerll.byteblower as byteblower\n'), ((234, 32, 234, 101), 'byteblowerll.byteblower.ConvertHTTPMultiClientStatusToString', 'byteblower.ConvertHTTPMultiClientStatusToString', ({(234, 80, 234, 100): 'request_status_value'}, {}), '(request_status_value)', True, 'import byteblowerll.byteblower as byteblower\n'), ((217, 12, 217, 25), 'time.sleep', 'time.sleep', ({(217, 23, 217, 24): '(1)'}, {}), '(1)', False, 'import time\n')]
24-timmarsseglingarna/app
tools/pod-xml-to-geojson.py
0c028bd2eb284c6893cb16dd91bd093b2222338f
#!/usr/bin/env python # Converts a PoD XML file to a GeoJSON file. # # With the --javascript parameter, the generated file is a javascript # file defining a variable 'basePodSpec'. # # Get the PoD XML file from http://dev.24-timmars.nu/PoD/xmlapi_app.php. import xml.etree.ElementTree as etree import argparse import re import json import io import sys import os.path import datetime if sys.version < '3': import codecs # points number 9000 and above are not real points; they are used to mark # area borders MAXPOINT=8999 def run(): parser = argparse.ArgumentParser() parser.add_argument("-i", "--infile", help="input file") parser.add_argument("-o", "--outfile", help="output file") parser.add_argument("--id", help="id of terrain") parser.add_argument("--javascript", action="store_true") args = parser.parse_args() tree = etree.parse(args.infile) all_points, start_points, turning_points = get_points(tree) inshore_legs, offshore_legs = get_legs(tree, all_points) output_pod(args.outfile, args.javascript, args.id, [('startPoints', start_points), ('turningPoints', turning_points), ('inshoreLegs', inshore_legs), ('offshoreLegs', offshore_legs)]) def output_pod(fname, javascript, id, features): if sys.version < '3': fd = codecs.open(fname, "w", encoding="utf-8") else: fd = io.open(fname, "w", encoding="utf-8") if javascript: fd.write(u'/* eslint-disable */\n') fd.write(u'export var basePodSpec = ') fd.write(u'{"id": %s, ' % id) flen = len(features) i = 1 for (name, obj) in features: fd.write(u'"%s": {"type": "FeatureCollection",' '"crs": { "type": "name",' '"properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },' '"features":' % name) fd.write(json.dumps(obj, ensure_ascii=False)) if i == flen: fd.write(u'}') else: i = i + 1 fd.write(u'},\n') if javascript: fd.write(u'};\n') else: fd.write(u'}\n') def get_points(tree): doc = tree.getroot() startnumbers = {} all_points = {} start_points = [] turning_points = [] for n in doc.findall("kretsar/krets/startpoints/number"): startnumbers[n.text] = True for p in doc.findall("points/point"): number = p.find("number").text if int(number) > MAXPOINT: continue name = p.find("name").text descr = p.find("descr").text lat = p.find("lat").text lng = p.find("long").text footnote = None footnoteelem = p.find("footnote") if footnoteelem is not None: footnote = footnoteelem.text properties = {"number": number, "name": name, "descr": descr} if footnote != None: properties["footnote"] = footnote coordinates = [float(lng), float(lat)] geometry = {"type": "Point", "coordinates": coordinates} point = {"type": "Feature", "properties": properties, "geometry": geometry}, if number in startnumbers: start_points.extend(point) else: turning_points.extend(point) all_points[number] = coordinates return all_points, start_points, turning_points def get_legs(tree, all_points): doc = tree.getroot() coast = [] offshore = [] for p in doc.findall("legs/leg"): src = p.find("from").text dst = p.find("to").text if int(src) > MAXPOINT or int(dst) > MAXPOINT: continue if int(src) < int(dst): # since all legs are present twice (in both directions), # skip one direction continue dist = p.find("dist").text sea = p.find("sea").text addtime = p.find("addtime").text if dist is None: print("** error: no distance: src: %s dst: %s" % (src, dst)) properties = {"src": src, "dst": dst, "dist": float(dist)} if properties["dist"] == 0 and addtime == "1": properties["addtime"] = True; src_coords = all_points[src] dst_coords = all_points[dst] geometry = {"type": "LineString", "coordinates": [src_coords, dst_coords]} leg = {"type": "Feature", "properties": properties, "geometry": geometry}, if sea == "0": coast.extend(leg) else: offshore.extend(leg) return coast, offshore if __name__ == '__main__': run()
[((26, 13, 26, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((32, 11, 32, 35), 'xml.etree.ElementTree.parse', 'etree.parse', ({(32, 23, 32, 34): 'args.infile'}, {}), '(args.infile)', True, 'import xml.etree.ElementTree as etree\n'), ((45, 13, 45, 54), 'codecs.open', 'codecs.open', (), '', False, 'import codecs\n'), ((47, 13, 47, 50), 'io.open', 'io.open', (), '', False, 'import io\n'), ((59, 17, 59, 52), 'json.dumps', 'json.dumps', (), '', False, 'import json\n')]
carderne/raster-vision
rastervision/plugin.py
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
import os import json import importlib from pluginbase import PluginBase import rastervision as rv from rastervision.protos.plugin_pb2 import PluginConfig as PluginConfigMsg from rastervision.utils.files import download_if_needed class PluginError(Exception): pass def load_conf_list(s): """Loads a list of items from the config. Lists should be comma separated. This takes into account that previous versions of Raster Vision allowed for a `[ "module" ]` like syntax, even though that didn't work for multi-value lists. """ try: # A comma separated list of values will be transformed to # having a list-like string, with ' instead of ". Replacing # single quotes with double quotes lets us parse it as a JSON list. return json.loads(s.replace("'", '"')) except json.JSONDecodeError: return list(map(lambda x: x.strip(), s.split(','))) class PluginRegistry: @staticmethod def get_instance(): return rv._registry._get_plugin_registry() def __init__(self, plugin_config, rv_home): """Initializes this plugin registry. A plugin registry is passed to plugins in a call to their "register_plugin" method. Args: plugin_config - the everett ConfigManager for the plugin section of the application configuration. """ self.plugin_root_dir = os.path.join(rv_home, 'plugins') self.config_builders = {} self.command_config_builders = {} self.commands = [] self.aux_command_classes = {} self.default_raster_sources = [] self.default_vector_sources = [] self.default_label_sources = [] self.default_label_stores = [] self.default_evaluators = [] self.experiment_runners = {} self.filesystems = [] plugin_files = load_conf_list(plugin_config('files', default='[]')) self._load_from_files(plugin_files) self.plugin_files = plugin_files plugin_modules = load_conf_list(plugin_config('modules', default='[]')) self._load_from_modules(plugin_modules) self.plugin_modules = plugin_modules def _load_plugin(self, plugin, identifier): # Check the plugin is valid if not hasattr(plugin, 'register_plugin'): raise PluginError('Plugin at {} does not have ' '"register_plugin" method.'.format(identifier)) register_method = getattr(plugin, 'register_plugin') if not callable(register_method): raise PluginError('Plugin at {} has a ' '"register_plugin" attribute, ' 'but it is not callable'.format(identifier)) # TODO: Log loading plugin. register_method(self) def _load_from_files(self, plugin_paths): if not plugin_paths: return self.plugin_sources = [] plugin_base = PluginBase(package='rastervision.plugins') for uri in plugin_paths: plugin_name = os.path.splitext(os.path.basename(uri))[0] plugin_path = os.path.join(self.plugin_root_dir, plugin_name) fs = rv._registry.get_file_system(uri, search_plugins=False) local_path = download_if_needed(uri, plugin_path, fs=fs) local_dir = os.path.dirname(local_path) plugin_source = plugin_base.make_plugin_source( searchpath=[local_dir]) # We're required to hang onto the source # to keep it from getting GC'd. self.plugin_sources.append(plugin_source) self._load_plugin(plugin_source.load_plugin(plugin_name), uri) def _load_from_modules(self, plugin_modules): if not plugin_modules: return for module in plugin_modules: plugin = importlib.import_module(module) self._load_plugin(plugin, module) def add_plugins_from_proto(self, plugin_msg): new_plugin_files = list( set(plugin_msg.plugin_uris) - set(self.plugin_files)) self._load_from_files(new_plugin_files) self.plugin_files.extend(new_plugin_files) new_plugin_modules = list( set(plugin_msg.plugin_modules) - set(self.plugin_modules)) self._load_from_modules(new_plugin_modules) self.plugin_modules.extend(new_plugin_modules) def to_proto(self): """Returns a protobuf message that records the plugin sources for plugins that are currently loaded in the registry. """ return PluginConfigMsg( plugin_uris=self.plugin_files, plugin_modules=self.plugin_modules) def register_config_builder(self, group, key, builder_class): """Registers a ConfigBuilder as a plugin. Args: group - The Config group, e.g. rv.BACKEND, rv.TASK. key - The key used for this plugin. This will be used to construct the builder in a ".builder(key)" call. builder_class - The subclass of ConfigBuilder that builds the Config for this plugin. """ if (group, key) in self.config_builders: raise PluginError('ConfigBuilder already registered for group ' '{} and key {}'.format(group, key)) self.config_builders[(group, key)] = builder_class def register_command_config_builder(self, command_type, builder_class): """Registers a ConfigBuilder as a plugin. Args: command_type - The key used for this plugin. This will be used to construct the builder in a ".builder(key)" call. builder_class - The subclass of CommandConfigBuilder that builds the CommandConfig for this plugin. """ if command_type in self.command_config_builders: raise PluginError( 'CommandConfigBuilder already registered for command' 'with type {}'.format(command_type)) self.command_config_builders[command_type] = builder_class self.commands.append(command_type) def register_aux_command(self, command_type, command_class): """Registers a custom AuxCommand as a plugin. Args: command_type - The key used for this plugin. This will be used to construct the builder in a ".builder(key)" call. command_class - The subclass of AuxCommand subclass to register. """ if command_type in self.command_config_builders: raise PluginError( 'CommandConfigBuilder is already registered for command' 'with type {}'.format(command_type)) if command_type in self.aux_command_classes: raise PluginError('AuxCommand is already registered for command' 'with type {}'.format(command_type)) self.aux_command_classes[command_type] = command_class if command_class.options.include_by_default: self.commands.append(command_type) def register_default_raster_source(self, provider_class): """Registers a RasterSourceDefaultProvider for use as a plugin.""" self.default_raster_sources.append(provider_class) def register_default_vector_source(self, provider_class): """Registers a VectorSourceDefaultProvider for use as a plugin.""" self.default_vector_sources.append(provider_class) def register_default_label_source(self, provider_class): """Registers a LabelSourceDefaultProvider for use as a plugin.""" self.default_label_sources.append(provider_class) def register_default_label_store(self, provider_class): """Registers a LabelStoreDefaultProvider for use as a plugin.""" self.default_label_stores.append(provider_class) def register_default_evaluator(self, provider_class): """Registers an EvaluatorDefaultProvider for use as a plugin.""" self.default_evaluators.append(provider_class) def register_experiment_runner(self, runner_key, runner_class): """Registers an ExperimentRunner as a plugin. Args: runner_key - The key used to reference this plugin runner. This is a string that will match the command line argument used to reference this runner; e.g. if the key is "FOO_RUNNER", then users can use the runner by issuing a "rastervision run foo_runner ..." command. runner_class - The class of the ExperimentRunner plugin. """ if runner_key in self.experiment_runners: raise PluginError('ExperimentRunner already registered for ' 'key {}'.format(runner_key)) self.experiment_runners[runner_key] = runner_class def register_filesystem(self, filesystem_class): """Registers a FileSystem as a plugin.""" self.filesystems.append(filesystem_class)
[((37, 15, 37, 50), 'rastervision._registry._get_plugin_registry', 'rv._registry._get_plugin_registry', ({}, {}), '()', True, 'import rastervision as rv\n'), ((49, 31, 49, 63), 'os.path.join', 'os.path.join', ({(49, 44, 49, 51): 'rv_home', (49, 53, 49, 62): '"""plugins"""'}, {}), "(rv_home, 'plugins')", False, 'import os\n'), ((91, 22, 91, 64), 'pluginbase.PluginBase', 'PluginBase', (), '', False, 'from pluginbase import PluginBase\n'), ((132, 15, 133, 78), 'rastervision.protos.plugin_pb2.PluginConfig', 'PluginConfigMsg', (), '', True, 'from rastervision.protos.plugin_pb2 import PluginConfig as PluginConfigMsg\n'), ((94, 26, 94, 73), 'os.path.join', 'os.path.join', ({(94, 39, 94, 59): 'self.plugin_root_dir', (94, 61, 94, 72): 'plugin_name'}, {}), '(self.plugin_root_dir, plugin_name)', False, 'import os\n'), ((95, 17, 95, 72), 'rastervision._registry.get_file_system', 'rv._registry.get_file_system', (), '', True, 'import rastervision as rv\n'), ((96, 25, 96, 68), 'rastervision.utils.files.download_if_needed', 'download_if_needed', (), '', False, 'from rastervision.utils.files import download_if_needed\n'), ((97, 24, 97, 51), 'os.path.dirname', 'os.path.dirname', ({(97, 40, 97, 50): 'local_path'}, {}), '(local_path)', False, 'import os\n'), ((113, 21, 113, 52), 'importlib.import_module', 'importlib.import_module', ({(113, 45, 113, 51): 'module'}, {}), '(module)', False, 'import importlib\n'), ((93, 43, 93, 64), 'os.path.basename', 'os.path.basename', ({(93, 60, 93, 63): 'uri'}, {}), '(uri)', False, 'import os\n')]
eldar/acsm
acsm/nnutils/resunet.py
04069e8bb4c12185473dc10c3355e5367fa98968
from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags import os import os.path as osp import numpy as np import torch import torchvision import torch.nn as nn from torch.autograd import Variable import functools from . import net_blocks as nb import pdb class ResNetConcatGenerator(nn.Module): def __init__(self, input_nc, output_nc, n_blocks=3, ngf=64,): super(ResNetConcatGenerator, self).__init__() self.encoder = ResnetEncoder(n_blocks=n_blocks) self.n_blocks = n_blocks decoder = [] if n_blocks == 3: inner_nc = 256 nlayers = 4 elif n_blocks == 4: inner_nc = 512 nlayers = 5 for lx in range(nlayers): outnc = max(inner_nc // 2, 16) up = nb.upconv2d(inner_nc, outnc) decoder.append(up) inner_nc = outnc up = nn.Conv2d( inner_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=True) decoder.append(up) self.decoder = nn.Sequential(*decoder) nb.net_init(self.decoder) return def forward(self, input): img_enc = self.encoder(input) img_dec = self.decoder(img_enc) return img_dec def reinit_weights(self, ): self.encoder = ResnetEncoder(n_blocks=self.n_blocks) nb.net_init(self.decoder) class ResnetEncoder(nn.Module): def __init__(self, n_blocks): super(ResnetEncoder, self).__init__() self.resnet = torchvision.models.resnet18(pretrained=True) self.n_blocks = n_blocks def forward(self, x): n_blocks = self.n_blocks x = self.resnet.conv1(x) x = self.resnet.bn1(x) x = self.resnet.relu(x) x = self.resnet.maxpool(x) if n_blocks >= 1: x = self.resnet.layer1(x) if n_blocks >= 2: x = self.resnet.layer2(x) if n_blocks >= 3: x = self.resnet.layer3(x) if n_blocks >= 4: x = self.resnet.layer4(x) return x
[((39, 13, 40, 79), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((42, 23, 42, 46), 'torch.nn.Sequential', 'nn.Sequential', ({(42, 37, 42, 45): '*decoder'}, {}), '(*decoder)', True, 'import torch.nn as nn\n'), ((59, 22, 59, 66), 'torchvision.models.resnet18', 'torchvision.models.resnet18', (), '', False, 'import torchvision\n')]
marinang/uproot-methods
uproot_methods/common/TVector.py
1d16d51ab7da19b4f31070d24e8fbfed3ae3ec8f
#!/usr/bin/env python # Copyright (c) 2018, DIANA-HEP # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import math import numbers import operator import awkward import awkward.util class Common(object): @property def mag2(self): return self.dot(self) @property def mag(self): return awkward.util.numpy.sqrt(self.mag2) @property def rho2(self): out = self.x*self.x out = out + self.y*self.y return out def delta_phi(self, other): return (self.phi - other.phi + math.pi) % (2*math.pi) - math.pi def isparallel(self, other, tolerance=1e-10): return 1 - self.cosdelta(other) < tolerance def isantiparallel(self, other, tolerance=1e-10): return self.cosdelta(other) - (-1) < tolerance def iscollinear(self, other, tolerance=1e-10): return 1 - awkward.util.numpy.absolute(self.cosdelta(other)) < tolerance def __lt__(self, other): raise TypeError("spatial vectors have no natural ordering") def __gt__(self, other): raise TypeError("spatial vectors have no natural ordering") def __le__(self, other): raise TypeError("spatial vectors have no natural ordering") def __ge__(self, other): raise TypeError("spatial vectors have no natural ordering") class ArrayMethods(Common): @property def unit(self): return self / self.mag @property def rho(self): out = self.rho2 return awkward.util.numpy.sqrt(out) @property def phi(self): return awkward.util.numpy.arctan2(self.y, self.x) def cosdelta(self, other): denom = self.mag2 * other.mag2 mask = (denom > 0) denom = denom[mask] denom[:] = awkward.util.numpy.sqrt(denom) out = self.dot(other) out[mask] /= denom mask = awkward.util.numpy.logical_not(mask) out[mask] = 1 return awkward.util.numpy.clip(out, -1, 1) def angle(self, other, normal=None, degrees=False): out = awkward.util.numpy.arccos(self.cosdelta(other)) if normal is not None: a = self.unit b = other.unit out = out * awkward.util.numpy.sign(normal.dot(a.cross(b))) if degrees: out = awkward.util.numpy.multiply(out, 180.0/awkward.util.numpy.pi) return out def isopposite(self, other, tolerance=1e-10): tmp = self + other tmp.x = awkward.util.numpy.absolute(tmp.x) tmp.y = awkward.util.numpy.absolute(tmp.y) tmp.z = awkward.util.numpy.absolute(tmp.z) out = (tmp.x < tolerance) out = awkward.util.numpy.bitwise_and(out, tmp.y < tolerance) out = awkward.util.numpy.bitwise_and(out, tmp.z < tolerance) return out def isperpendicular(self, other, tolerance=1e-10): tmp = self.dot(other) tmp.x = awkward.util.numpy.absolute(tmp.x) tmp.y = awkward.util.numpy.absolute(tmp.y) tmp.z = awkward.util.numpy.absolute(tmp.z) out = (tmp.x < tolerance) out = awkward.util.numpy.bitwise_and(out, tmp.y < tolerance) out = awkward.util.numpy.bitwise_and(out, tmp.z < tolerance) return out class Methods(Common): @property def unit(self): return self / self.mag @property def rho(self): return math.sqrt(self.rho2) @property def phi(self): return math.atan2(self.y, self.x) def cosdelta(self, other): m1 = self.mag2 m2 = other.mag2 if m1 == 0 or m2 == 0: return 1.0 r = self.dot(other) / math.sqrt(m1 * m2) return max(-1.0, min(1.0, r)) def angle(self, other, degrees=False): out = math.acos(self.cosdelta(other)) if degrees: out = out * 180.0/math.pi return out def isopposite(self, other, tolerance=1e-10): tmp = self + other return abs(tmp.x) < tolerance and abs(tmp.y) < tolerance and abs(tmp.z) < tolerance def isperpendicular(self, other, tolerance=1e-10): tmp = self.dot(other) return abs(tmp.x) < tolerance and abs(tmp.y) < tolerance and abs(tmp.z) < tolerance def __add__(self, other): return self._vector(operator.add, other) def __radd__(self, other): return self._vector(operator.add, other, True) def __sub__(self, other): return self._vector(operator.sub, other) def __rsub__(self, other): return self._vector(operator.sub, other, True) def __mul__(self, other): return self._scalar(operator.mul, other) def __rmul__(self, other): return self._scalar(operator.mul, other, True) def __div__(self, other): return self._scalar(operator.div, other) def __rdiv__(self, other): return self._scalar(operator.div, other, True) def __truediv__(self, other): return self._scalar(operator.truediv, other) def __rtruediv__(self, other): return self._scalar(operator.truediv, other, True) def __floordiv__(self, other): return self._scalar(operator.floordiv, other) def __rfloordiv__(self, other): return self._scalar(operator.floordiv, other, True) def __mod__(self, other): return self._scalar(operator.mod, other) def __rmod__(self, other): return self._scalar(operator.mod, other, True) def __divmod__(self, other): return self._scalar(operator.divmod, other) def __rdivmod__(self, other): return self._scalar(operator.divmod, other, True) def __pow__(self, other): if isinstance(other, (numbers.Number, awkward.util.numpy.number)): if other == 2: return self.mag2 else: return self.mag2**(0.5*other) else: self._scalar(operator.pow, other) # no __rpow__ def __lshift__(self, other): return self._scalar(operator.lshift, other) def __rlshift__(self, other): return self._scalar(operator.lshift, other, True) def __rshift__(self, other): return self._scalar(operator.rshift, other) def __rrshift__(self, other): return self._scalar(operator.rshift, other, True) def __and__(self, other): return self._scalar(operator.and_, other) def __rand__(self, other): return self._scalar(operator.and_, other, True) def __or__(self, other): return self._scalar(operator.or_, other) def __ror__(self, other): return self._scalar(operator.or_, other, True) def __xor__(self, other): return self._scalar(operator.xor, other) def __rxor__(self, other): return self._scalar(operator.xor, other, True) def __neg__(self): return self._unary(operator.neg) def __pos__(self): return self._unary(operator.pos) def __abs__(self): return self.mag def __invert__(self): return self._unary(operator.invert)
[((45, 15, 45, 49), 'awkward.util.numpy.sqrt', 'awkward.util.numpy.sqrt', ({(45, 39, 45, 48): 'self.mag2'}, {}), '(self.mag2)', False, 'import awkward\n'), ((85, 15, 85, 43), 'awkward.util.numpy.sqrt', 'awkward.util.numpy.sqrt', ({(85, 39, 85, 42): 'out'}, {}), '(out)', False, 'import awkward\n'), ((89, 15, 89, 57), 'awkward.util.numpy.arctan2', 'awkward.util.numpy.arctan2', ({(89, 42, 89, 48): 'self.y', (89, 50, 89, 56): 'self.x'}, {}), '(self.y, self.x)', False, 'import awkward\n'), ((95, 19, 95, 49), 'awkward.util.numpy.sqrt', 'awkward.util.numpy.sqrt', ({(95, 43, 95, 48): 'denom'}, {}), '(denom)', False, 'import awkward\n'), ((100, 15, 100, 51), 'awkward.util.numpy.logical_not', 'awkward.util.numpy.logical_not', ({(100, 46, 100, 50): 'mask'}, {}), '(mask)', False, 'import awkward\n'), ((103, 15, 103, 50), 'awkward.util.numpy.clip', 'awkward.util.numpy.clip', ({(103, 39, 103, 42): 'out', (103, 44, 103, 46): '(-1)', (103, 48, 103, 49): '(1)'}, {}), '(out, -1, 1)', False, 'import awkward\n'), ((117, 16, 117, 50), 'awkward.util.numpy.absolute', 'awkward.util.numpy.absolute', ({(117, 44, 117, 49): 'tmp.x'}, {}), '(tmp.x)', False, 'import awkward\n'), ((118, 16, 118, 50), 'awkward.util.numpy.absolute', 'awkward.util.numpy.absolute', ({(118, 44, 118, 49): 'tmp.y'}, {}), '(tmp.y)', False, 'import awkward\n'), ((119, 16, 119, 50), 'awkward.util.numpy.absolute', 'awkward.util.numpy.absolute', ({(119, 44, 119, 49): 'tmp.z'}, {}), '(tmp.z)', False, 'import awkward\n'), ((122, 14, 122, 68), 'awkward.util.numpy.bitwise_and', 'awkward.util.numpy.bitwise_and', ({(122, 45, 122, 48): 'out', (122, 50, 122, 67): 'tmp.y < tolerance'}, {}), '(out, tmp.y < tolerance)', False, 'import awkward\n'), ((123, 14, 123, 68), 'awkward.util.numpy.bitwise_and', 'awkward.util.numpy.bitwise_and', ({(123, 45, 123, 48): 'out', (123, 50, 123, 67): 'tmp.z < tolerance'}, {}), '(out, tmp.z < tolerance)', False, 'import awkward\n'), ((128, 16, 128, 50), 'awkward.util.numpy.absolute', 'awkward.util.numpy.absolute', ({(128, 44, 128, 49): 'tmp.x'}, {}), '(tmp.x)', False, 'import awkward\n'), ((129, 16, 129, 50), 'awkward.util.numpy.absolute', 'awkward.util.numpy.absolute', ({(129, 44, 129, 49): 'tmp.y'}, {}), '(tmp.y)', False, 'import awkward\n'), ((130, 16, 130, 50), 'awkward.util.numpy.absolute', 'awkward.util.numpy.absolute', ({(130, 44, 130, 49): 'tmp.z'}, {}), '(tmp.z)', False, 'import awkward\n'), ((133, 14, 133, 68), 'awkward.util.numpy.bitwise_and', 'awkward.util.numpy.bitwise_and', ({(133, 45, 133, 48): 'out', (133, 50, 133, 67): 'tmp.y < tolerance'}, {}), '(out, tmp.y < tolerance)', False, 'import awkward\n'), ((134, 14, 134, 68), 'awkward.util.numpy.bitwise_and', 'awkward.util.numpy.bitwise_and', ({(134, 45, 134, 48): 'out', (134, 50, 134, 67): 'tmp.z < tolerance'}, {}), '(out, tmp.z < tolerance)', False, 'import awkward\n'), ((144, 15, 144, 35), 'math.sqrt', 'math.sqrt', ({(144, 25, 144, 34): 'self.rho2'}, {}), '(self.rho2)', False, 'import math\n'), ((148, 15, 148, 41), 'math.atan2', 'math.atan2', ({(148, 26, 148, 32): 'self.y', (148, 34, 148, 40): 'self.x'}, {}), '(self.y, self.x)', False, 'import math\n'), ((112, 18, 112, 79), 'awkward.util.numpy.multiply', 'awkward.util.numpy.multiply', ({(112, 46, 112, 49): 'out', (112, 51, 112, 78): '180.0 / awkward.util.numpy.pi'}, {}), '(out, 180.0 / awkward.util.numpy.pi)', False, 'import awkward\n'), ((155, 30, 155, 48), 'math.sqrt', 'math.sqrt', ({(155, 40, 155, 47): '(m1 * m2)'}, {}), '(m1 * m2)', False, 'import math\n')]
pennyarcade/TPPP
tpp/controller/ConversionController.py
9bb6db774d77f74c54ed2fa004e97c1aa114fff9
""" Implements a non interactive controller to controt non-interactive visualizers. (i.e. those that are used for converting TPP souce code into another format) """ from tpp.FileParser import FileParser from tpp.controller.TPPController import TPPController class ConversionController(TPPController): """ Implements a non interactive controller to run non-interactive visualizers. (i.e. those that are used for converting TPP source code into another format) """ def __init__(self, input_file, output, visualizer_class): """ Todo: ApiDoc. :rtype: :param input: :param output: :param visualizer_class: """ super(ConversionController, self).__init__() parser = FileParser(input_file) self.pages = parser.get_pages() self.vis = visualizer_class(output) def run(self): """ Todo: ApiDoc. :return: """ for page in self.pages: while True: eop = page.is_eop() self.vis.visualize(page.next_line(), eop) if eop: break def close(self): """ Todo: ApiDoc. :return: """ self.vis.close()
[((27, 17, 27, 39), 'tpp.FileParser.FileParser', 'FileParser', ({(27, 28, 27, 38): 'input_file'}, {}), '(input_file)', False, 'from tpp.FileParser import FileParser\n')]
ZachGeo/covidGR_API
scrapers/covid_scraper.py
2f316337dda65bd33ac895df336481c3c2abe2c6
from bs4 import BeautifulSoup from datetime import date from lxml import html import requests import re import json class CovidScraper: def __init__(self): self.api_url = 'http://127.0.0.1:5000/covidgr' self.api_sum_url = 'http://127.0.0.1:5000/summary/covidgr' self.api_test_url = 'http://127.0.0.1:5000/covidgr/tests' self.scrape_url = 'https://www.worldometers.info/coronavirus/country/greece/' self.scrape_tests_url = 'https://github.com/owid/covid-19-data/blob/master/public/data/testing/covid-testing-latest-data-source-details.csv' self.today = '' self.covid_data = [] self.summary_data= [] def scrape_data(self): data = [] self.today = str(date.today()) soup = self.scrape_page_content() soup_test_page = self.scrape_page_content_contains_tests() if soup: self.get_daily_data(soup) self.get_summary_data(soup) if self.summary_data and self.covid_data: post_daily_and_sum_covid_data = self.call_api_put_data( self.today, self.covid_data, self.summary_data) data.append(post_daily_and_sum_covid_data) if soup_test_page: tests_data = self.get_tests_per_day(soup_test_page) if tests_data[0]: post_daily_tests_covid_data = self.call_api_post_tested_covid_data( tests_data[0], tests_data[1]) data.append(post_daily_tests_covid_data) return data def scrape_page_content(self): page = requests.get(self.scrape_url) soup = BeautifulSoup(page.content, 'html.parser') return soup def scrape_page_content_contains_tests(self): page = requests.get(self.scrape_tests_url) soup = BeautifulSoup(page.content, 'html.parser') return soup def get_daily_data(self, soup): covid_data = [] daily_covidgr_html_content = soup.find('li', class_='news_li') get_daily_covidgr_text = daily_covidgr_html_content.text for elem in get_daily_covidgr_text.split(): regex = '\d*(.|)\d+' match = re.findall(regex, elem) if match: covid_data.append(elem) self.covid_data = covid_data def get_summary_data(self, soup): summary_data = [] all_cases_covidgr_html_content = soup.find_all( 'div', class_='maincounter-number') for item in range(len(all_cases_covidgr_html_content)): regex = r'(\n)|\s' all_cases_data = re.sub( regex, '', all_cases_covidgr_html_content[item].text) summary_data.append(all_cases_data) self.summary_data = summary_data def get_tests_per_day(self, tree): html_content = tree.find('tr', id='LC34').find_all('td') country_code = html_content[1] date_test = html_content[3].text if country_code.text == 'GRC': today_tests = html_content[10].text total_tests = html_content[8].text return [date_test, today_tests] def call_api_post_tested_covid_data(self, today, tests): headers = { 'Content-type': 'application/json', } data = json.dumps({"date": today, "daily_test": tests}) response_tests = requests.post( self.api_test_url, headers=headers, data=data) return response_tests.json() def call_api_put_data(self, today, covid_data, summary_data): headers = { 'Content-type': 'application/json', } data = json.dumps( {"date": today, "cases": covid_data[0], "deaths": covid_data[1]}) sum_data = json.dumps( {"sum_cases": summary_data[0], "sum_deaths": summary_data[1], "sum_recovered": summary_data[2]}) response = requests.post(self.api_url, headers=headers, data=data) response_sum = requests.put( self.api_sum_url, headers=headers, data=sum_data) return [response.json(), response_sum.json()] if __name__ == '__main__': cs = CovidScraper() results = cs.scrape_data() print(results)
[((47, 15, 47, 44), 'requests.get', 'requests.get', ({(47, 28, 47, 43): 'self.scrape_url'}, {}), '(self.scrape_url)', False, 'import requests\n'), ((48, 15, 48, 57), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(48, 29, 48, 41): 'page.content', (48, 43, 48, 56): '"""html.parser"""'}, {}), "(page.content, 'html.parser')", False, 'from bs4 import BeautifulSoup\n'), ((53, 15, 53, 50), 'requests.get', 'requests.get', ({(53, 28, 53, 49): 'self.scrape_tests_url'}, {}), '(self.scrape_tests_url)', False, 'import requests\n'), ((54, 15, 54, 57), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(54, 29, 54, 41): 'page.content', (54, 43, 54, 56): '"""html.parser"""'}, {}), "(page.content, 'html.parser')", False, 'from bs4 import BeautifulSoup\n'), ((103, 15, 103, 63), 'json.dumps', 'json.dumps', ({(103, 26, 103, 62): "{'date': today, 'daily_test': tests}"}, {}), "({'date': today, 'daily_test': tests})", False, 'import json\n'), ((105, 25, 106, 58), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((115, 15, 116, 77), 'json.dumps', 'json.dumps', ({(116, 12, 116, 76): "{'date': today, 'cases': covid_data[0], 'deaths': covid_data[1]}"}, {}), "({'date': today, 'cases': covid_data[0], 'deaths': covid_data[1]})", False, 'import json\n'), ((118, 19, 119, 108), 'json.dumps', 'json.dumps', ({(119, 12, 119, 107): "{'sum_cases': summary_data[0], 'sum_deaths': summary_data[1],\n 'sum_recovered': summary_data[2]}"}, {}), "({'sum_cases': summary_data[0], 'sum_deaths': summary_data[1],\n 'sum_recovered': summary_data[2]})", False, 'import json\n'), ((121, 19, 121, 74), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((123, 23, 124, 61), 'requests.put', 'requests.put', (), '', False, 'import requests\n'), ((22, 25, 22, 37), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date\n'), ((66, 20, 66, 43), 're.findall', 're.findall', ({(66, 31, 66, 36): 'regex', (66, 38, 66, 42): 'elem'}, {}), '(regex, elem)', False, 'import re\n'), ((80, 29, 81, 69), 're.sub', 're.sub', ({(81, 16, 81, 21): 'regex', (81, 23, 81, 25): '""""""', (81, 27, 81, 68): 'all_cases_covidgr_html_content[item].text'}, {}), "(regex, '', all_cases_covidgr_html_content[item].text)", False, 'import re\n')]
schorsche/css3-imageslider
img/autoeditimg.py
6d15b2e77f141b8e871bdce2049ee7b2567981fe
#!/usr/bin/python2.7 import os from PIL import Image DATEI_WEB_GROSSE = 700 def isimg(isitimg): ext = os.path.splitext(isitimg)[1].lower() if ext == ".jpg" or ext == ".png" or ext == ".gif": return True return False def bearbeiten(datei): img = Image.open(datei) wrel = DATEI_WEB_GROSSE / float(img.size[0]) habs = int( float(img.size[1]) * float(wrel) ) splt = os.path.splitext(datei) newfilename = splt[0] + splt[1].lower() img = img.resize((DATEI_WEB_GROSSE, habs), Image.ANTIALIAS) img.save(newfilename, quality=100, optimize=True, progressive=True) if newfilename != datei: os.rename(newfilename, datei) def main(): files = os.listdir('.') files = filter(isimg, files) for f in files: print f bearbeiten(f) if __name__ == '__main__': main()
[]
brisberg/Kiri-Cogs
wow/wow.py
9a5307ff8fbaa5e0560ec518cf26df52347da98d
import discord from discord.ext import commands class WowCog: """Custom Cog that had commands for WoW Memes""" def __init__(self, bot): self.bot = bot async def _play(self, url, ctx): """Helper for aliasing Play in the Audio module""" audio = self.bot.get_cog('Audio') if not audio: await self.bot.say("Audio module required. Load with: {}load audio".format(ctx.prefix)) return await ctx.invoke(audio.play, url_or_search_terms=url) @commands.command(pass_context=True, no_pm=True) async def flamewreath(self, ctx): """I will not move when Flame Wreath is cast!""" await self._play("https://www.youtube.com/watch?v=gcA6y7sxKcA", ctx) def setup(bot): bot.add_cog(WowCog(bot))
[((19, 5, 19, 52), 'discord.ext.commands.command', 'commands.command', (), '', False, 'from discord.ext import commands\n')]
henrik997/privacy-evaluator
tests/attacks/class_test.py
f1d0e6c10ff58e582a44243788ab66c1d453bfa0
import pytest from privacy_evaluator.attacks.sample_attack import Sample_Attack """ This test only test if no error is thrown when calling the function, can be removed in the future """ def test_sample_attack(): test = Sample_Attack(0, 0, 0) test.perform_attack()
[((9, 11, 9, 33), 'privacy_evaluator.attacks.sample_attack.Sample_Attack', 'Sample_Attack', ({(9, 25, 9, 26): '0', (9, 28, 9, 29): '0', (9, 31, 9, 32): '0'}, {}), '(0, 0, 0)', False, 'from privacy_evaluator.attacks.sample_attack import Sample_Attack\n')]
Oli2/presto-python-client
setup.py
11a89c2528a35d5af6916e9c9175cb3e1f84160b
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import re from setuptools import setup import textwrap _version_re = re.compile(r'__version__\s+=\s+(.*)') with open('prestodb/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) setup( name='presto-python-client', author='Presto Team', author_email='[email protected]', version=version, url='https://github.com/prestodb/presto-python-client', packages=['prestodb'], package_data={'': ['LICENSE', 'README.md']}, description='Client for the Presto distributed SQL Engine', long_description=textwrap.dedent(""" Client for Presto (https://prestodb.io), a distributed SQL engine for interactive and batch big data processing. Provides a low-level client and a DBAPI 2.0 implementation. """), license='Apache 2.0', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Operating System :: Microsoft :: Windows', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Database :: Front-Ends', ], install_requires=[ 'click', 'future', 'ipaddress', 'requests', 'requests_kerberos', 'six', 'typing', ], extras_require={'tests':[ 'httpretty', 'pytest', 'pytest-runner', ]} )
[((19, 14, 19, 51), 're.compile', 're.compile', ({(19, 25, 19, 50): '"""__version__\\\\s+=\\\\s+(.*)"""'}, {}), "('__version__\\\\s+=\\\\s+(.*)')", False, 'import re\n'), ((36, 21, 40, 8), 'textwrap.dedent', 'textwrap.dedent', ({(36, 37, 40, 7): '"""\n Client for Presto (https://prestodb.io), a distributed SQL engine for\n interactive and batch big data processing. Provides a low-level client and\n a DBAPI 2.0 implementation.\n """'}, {}), '(\n """\n Client for Presto (https://prestodb.io), a distributed SQL engine for\n interactive and batch big data processing. Provides a low-level client and\n a DBAPI 2.0 implementation.\n """\n )', False, 'import textwrap\n')]
TausifAnsari/PyHub
Graphs/Pie Chart.py
f6c949dc6a3974f57d7d146708443d0ceeb4418f
import matplotlib.pyplot as graph subject = ["Probability", "Calculas", "Discrete Mathematics", "Adv Engineering Mathematics", "Linear Algebra", "Cryptography"] weightage = [250,900,850,1200,290,345] seperator = [0.05,0,0,0,0.05,0.05] graph.title("Mathematics Topic Weightage") graph.pie(weightage,labels=subject,autopct="%0.1f%%", explode=seperator) graph.show()
[((10, 0, 10, 42), 'matplotlib.pyplot.title', 'graph.title', ({(10, 12, 10, 41): '"""Mathematics Topic Weightage"""'}, {}), "('Mathematics Topic Weightage')", True, 'import matplotlib.pyplot as graph\n'), ((11, 0, 11, 72), 'matplotlib.pyplot.pie', 'graph.pie', (), '', True, 'import matplotlib.pyplot as graph\n'), ((12, 0, 12, 12), 'matplotlib.pyplot.show', 'graph.show', ({}, {}), '()', True, 'import matplotlib.pyplot as graph\n')]
noavilk/IML.HUJI
exercises/perform_model_selection.py
35aa4e6fbe489239e4fe72bf38c0dba3e6c81f37
from __future__ import annotations import numpy as np import pandas as pd from sklearn import datasets from IMLearn.metrics import mean_square_error from IMLearn.utils import split_train_test from IMLearn.model_selection import cross_validate from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression from sklearn.linear_model import Lasso from utils import * import plotnine as gg def select_polynomial_degree(n_samples: int = 100, noise: float = 5): """ Simulate data from a polynomial model and use cross-validation to select the best fitting degree Parameters ---------- n_samples: int, default=100 Number of samples to generate noise: float, default = 5 Noise level to simulate in responses """ # Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise # and split into training- and testing portions def f(x): return (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2) X = np.linspace(-1.2, 2, n_samples) y = f(X) + np.random.normal(0, noise, n_samples) train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3)) df_train = pd.DataFrame({"x": train_X.squeeze(), "y": train_y, "type": "Train"}) df_test = pd.DataFrame({"x": test_X.squeeze(), "y": test_y, "type": "test"}) x_stat = np.linspace(-1.4, 2, 100) df_stat = pd.DataFrame({"x": x_stat, "y": f(x_stat), "type": "Model"}) df = pd.concat([df_test, df_train]) title = f"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})" p = gg.ggplot() + \ gg.geom_point(df, gg.aes("x", "y", color="type")) + \ gg.geom_line(df_stat, gg.aes("x", "y")) + \ gg.theme_bw() + \ gg.ggtitle(title) # print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10 train_err = [] validation_err = [] for k in range(11): pf = PolynomialFitting(k) train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({"k": range(11), "avg error": train_err, "type": "train error"}) df2 = pd.DataFrame({"k": range(11), "avg error": validation_err, "type": "validation error"}) df = pd.concat([df1, df2]) title = f" Cross Validation for Polynomial Fitting Over Different Degrees k" p = gg.ggplot(df, gg.aes("k", "avg error", color="type")) + \ gg.geom_point() + \ gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \ gg.labs(y="Average training and validation errors", title=f"{title} \nWith Noise: {noise}, Num of samples: {n_samples}") gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False) # Question 3 - Using best value of k, fit a k-degree polynomial model and report test error best_k = np.argmin(np.array(validation_err)) pf = PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = pf.predict(test_X.to_numpy()) print("best k =", best_k) print("Test = ", round(mean_square_error(test_y.to_numpy(), y_pred), 2)) print("Validation = ", round(validation_err[best_k], 2)) def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500): """ Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter values for Ridge and Lasso regressions Parameters ---------- n_samples: int, default=50 Number of samples to generate n_evaluations: int, default = 500 Number of regularization parameter values to evaluate for each of the algorithms """ # Question 6 - Load diabetes dataset and split into training and testing portions X, y = datasets.load_diabetes(return_X_y=True, as_frame=True) train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:] # Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions for name, learner, ran in [("Ridge", RidgeRegression, np.linspace(0.001, 0.05, 500)), ("Lasso", Lasso, np.linspace(0.001, 0.5, 500))]: train_err = [] validation_err = [] for lam in ran: rg = learner(lam) train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({"lambda": ran, "avg error": train_err, "type": "train error"}) df2 = pd.DataFrame({"lambda": ran, "avg error": validation_err, "type": "validation error"}) df = pd.concat([df1, df2]) title = f"{name} Regularization Cross Validate Over Different Lambda" p = gg.ggplot(df, gg.aes("lambda", "avg error", color="type")) + \ gg.geom_line() + \ gg.theme_bw() + gg.labs(y="Average training and validation errors", title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 8 - Compare best Ridge model, best Lasso model and Least Squares model best_lam = np.argmin(np.array(validation_err)) rg = learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = rg.predict(test_X.to_numpy()) print(f"best lambda {name} = {round(ran[best_lam], 3)}") print(f"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}") lr = LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy()) print("Linear Regression Loss = ", lr.loss(test_X.to_numpy(), test_y.to_numpy())) if __name__ == '__main__': np.random.seed(0) select_polynomial_degree() select_polynomial_degree(noise=0) select_polynomial_degree(n_samples=1500, noise=10) select_regularization_parameter()
[((34, 8, 34, 39), 'numpy.linspace', 'np.linspace', ({(34, 20, 34, 24): '-1.2', (34, 26, 34, 27): '2', (34, 29, 34, 38): 'n_samples'}, {}), '(-1.2, 2, n_samples)', True, 'import numpy as np\n'), ((40, 13, 40, 38), 'numpy.linspace', 'np.linspace', ({(40, 25, 40, 29): '-1.4', (40, 31, 40, 32): '2', (40, 34, 40, 37): '100'}, {}), '(-1.4, 2, 100)', True, 'import numpy as np\n'), ((42, 9, 42, 39), 'pandas.concat', 'pd.concat', ({(42, 19, 42, 38): '[df_test, df_train]'}, {}), '([df_test, df_train])', True, 'import pandas as pd\n'), ((51, 4, 51, 81), 'plotnine.ggsave', 'gg.ggsave', (), '', True, 'import plotnine as gg\n'), ((64, 9, 64, 30), 'pandas.concat', 'pd.concat', ({(64, 19, 64, 29): '[df1, df2]'}, {}), '([df1, df2])', True, 'import pandas as pd\n'), ((71, 4, 71, 101), 'plotnine.ggsave', 'gg.ggsave', (), '', True, 'import plotnine as gg\n'), ((97, 11, 97, 65), 'sklearn.datasets.load_diabetes', 'datasets.load_diabetes', (), '', False, 'from sklearn import datasets\n'), ((127, 9, 127, 27), 'IMLearn.learners.regressors.LinearRegression', 'LinearRegression', ({}, {}), '()', False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression\n'), ((133, 4, 133, 21), 'numpy.random.seed', 'np.random.seed', ({(133, 19, 133, 20): '(0)'}, {}), '(0)', True, 'import numpy as np\n'), ((35, 15, 35, 52), 'numpy.random.normal', 'np.random.normal', ({(35, 32, 35, 33): '(0)', (35, 35, 35, 40): 'noise', (35, 42, 35, 51): 'n_samples'}, {}), '(0, noise, n_samples)', True, 'import numpy as np\n'), ((36, 56, 36, 71), 'pandas.DataFrame', 'pd.DataFrame', ({(36, 69, 36, 70): 'X'}, {}), '(X)', True, 'import pandas as pd\n'), ((36, 73, 36, 85), 'pandas.Series', 'pd.Series', ({(36, 83, 36, 84): 'y'}, {}), '(y)', True, 'import pandas as pd\n'), ((49, 8, 49, 25), 'plotnine.ggtitle', 'gg.ggtitle', ({(49, 19, 49, 24): 'title'}, {}), '(title)', True, 'import plotnine as gg\n'), ((57, 13, 57, 33), 'IMLearn.learners.regressors.PolynomialFitting', 'PolynomialFitting', ({(57, 31, 57, 32): 'k'}, {}), '(k)', False, 'from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression\n'), ((69, 8, 70, 84), 'plotnine.labs', 'gg.labs', (), '', True, 'import plotnine as gg\n'), ((74, 23, 74, 47), 'numpy.array', 'np.array', ({(74, 32, 74, 46): 'validation_err'}, {}), '(validation_err)', True, 'import numpy as np\n'), ((111, 14, 111, 90), 'pandas.DataFrame', 'pd.DataFrame', ({(111, 27, 111, 89): "{'lambda': ran, 'avg error': train_err, 'type': 'train error'}"}, {}), "({'lambda': ran, 'avg error': train_err, 'type': 'train error'})", True, 'import pandas as pd\n'), ((112, 14, 112, 100), 'pandas.DataFrame', 'pd.DataFrame', ({(112, 27, 112, 99): "{'lambda': ran, 'avg error': validation_err, 'type': 'validation error'}"}, {}), "({'lambda': ran, 'avg error': validation_err, 'type':\n 'validation error'})", True, 'import pandas as pd\n'), ((113, 13, 113, 34), 'pandas.concat', 'pd.concat', ({(113, 23, 113, 33): '[df1, df2]'}, {}), '([df1, df2])', True, 'import pandas as pd\n'), ((118, 8, 118, 85), 'plotnine.ggsave', 'gg.ggsave', (), '', True, 'import plotnine as gg\n'), ((48, 8, 48, 21), 'plotnine.theme_bw', 'gg.theme_bw', ({}, {}), '()', True, 'import plotnine as gg\n'), ((101, 58, 101, 87), 'numpy.linspace', 'np.linspace', ({(101, 70, 101, 75): '(0.001)', (101, 77, 101, 81): '(0.05)', (101, 83, 101, 86): '(500)'}, {}), '(0.001, 0.05, 500)', True, 'import numpy as np\n'), ((102, 48, 102, 76), 'numpy.linspace', 'np.linspace', ({(102, 60, 102, 65): '(0.001)', (102, 67, 102, 70): '(0.5)', (102, 72, 102, 75): '(500)'}, {}), '(0.001, 0.5, 500)', True, 'import numpy as np\n'), ((117, 28, 117, 92), 'plotnine.labs', 'gg.labs', (), '', True, 'import plotnine as gg\n'), ((121, 29, 121, 53), 'numpy.array', 'np.array', ({(121, 38, 121, 52): 'validation_err'}, {}), '(validation_err)', True, 'import numpy as np\n'), ((68, 8, 68, 21), 'plotnine.theme_bw', 'gg.theme_bw', ({}, {}), '()', True, 'import plotnine as gg\n'), ((117, 12, 117, 25), 'plotnine.theme_bw', 'gg.theme_bw', ({}, {}), '()', True, 'import plotnine as gg\n'), ((45, 8, 45, 19), 'plotnine.ggplot', 'gg.ggplot', ({}, {}), '()', True, 'import plotnine as gg\n'), ((47, 30, 47, 46), 'plotnine.aes', 'gg.aes', ({(47, 37, 47, 40): '"""x"""', (47, 42, 47, 45): '"""y"""'}, {}), "('x', 'y')", True, 'import plotnine as gg\n'), ((67, 8, 67, 23), 'plotnine.geom_point', 'gg.geom_point', ({}, {}), '()', True, 'import plotnine as gg\n'), ((116, 12, 116, 26), 'plotnine.geom_line', 'gg.geom_line', ({}, {}), '()', True, 'import plotnine as gg\n'), ((46, 26, 46, 56), 'plotnine.aes', 'gg.aes', (), '', True, 'import plotnine as gg\n'), ((66, 22, 66, 60), 'plotnine.aes', 'gg.aes', (), '', True, 'import plotnine as gg\n'), ((115, 26, 115, 69), 'plotnine.aes', 'gg.aes', (), '', True, 'import plotnine as gg\n')]
unfoldingWord-dev/d43-catalog
libraries/tools/media_utils.py
6c36f59b9b326e0ead45739c09631ef1e57c4932
import re import copy def parse_media(media, content_version, project_chapters): """ Converts a media object into formats usable in the catalog :param media: the media object :type media: dict :param content_version: the current version of the source content :type content_version: string :param project_chapters: a dictionary of project chapters :type project_chapters: dict :return: resource_formats, project_formats a list of resource formats and dictionary of project formats """ resource_formats = [] project_formats = {} if 'resource' in media: resource_formats = _parse_resource(media['resource'], content_version) if 'projects' in media: for project in media['projects']: project_id = project['identifier'] chapters = [] if project_id == 'obs': # TRICKY: obs projects always have 50 chapters # This allows empty projects to still publish media. for x in range(1, 51): # chapters 1..50 chapters.append(str(x).zfill(2)) if project_id in project_chapters: chapters = project_chapters[project_id] project_formats[project_id] = _parse_project(project, content_version, chapters) return resource_formats, project_formats def _parse_resource(resource, content_version): """ Converts a resource media object into formats usable in the catalog :param resource: the media object :type resource: dict :param content_version: the current version of the source content :type content_version: string :return: a list of formats """ source_version = _expand_keys(resource['version'], {'latest': content_version}) formats = [] if 'media' in resource: for media in resource['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars = _make_expansion_variables(media, content_version) if 'quality' in media and len(media['quality']) > 0: # build format for each quality for quality in media['quality']: expansion_vars['quality'] = quality format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) formats.append(format) else: # build a single format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) formats.append(format) return formats def _make_format(source_version, media_version, quality, media, expansion_vars): format = { 'format': '', 'modified': '', 'size': 0, 'source_version': '{}'.format(source_version), 'version': '{}'.format(media_version), 'contributor': media['contributor'], 'url': _expand_keys(media['url'], expansion_vars), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ] } if quality: format['quality'] = quality return format def _parse_project(project, content_version, chapters_ids): """ Converts a project media object into formats usable in the catalog :param project: the media object :type project: dict :param content_version: the current version of the source content :type content_version: string :param chapters_ids: a list of chapter identifiers in the project :type chapters_ids: list :return: a list of formats """ source_version = _expand_keys(project['version'], {'latest': content_version}) formats = [] if 'media' in project: for media in project['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars = _make_expansion_variables(media, content_version) if 'quality' in media and len(media['quality']) > 0: # build format for each quality for quality in media['quality']: expansion_vars['quality'] = quality format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters: format['chapters'] = chapters formats.append(format) else: # build single format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters: format['chapters'] = chapters formats.append(format) return formats def _prepare_chapter_formats(media, chapters, expansion_vars): """ This is a wrapper around the method `_parse_project_chapter`. Since we routinely conditionally prepare chapters in multiple places this handles it in one place :param media: the media object to inspect :param chapters: a list of chapter ids :param expansion_vars: a dictionary of variables that may be expanded in the chapter url :return: """ if 'chapter_url' in media: chapter_url = _expand_keys(media['chapter_url'], expansion_vars) chapters = _parse_project_chapter(chapter_url, chapters) if chapters: return chapters return None def _parse_project_chapter(chapter_url, chapters): """ Generates chapter formats for use in the catalog :param chapter_url: the url template that will be used in the formats :param chapters: a list of chapter ids :type chapters: list :return: """ # TODO: this requires that we give a well formatted list of chapter ids and check if the Rc is a book # only book RCs can have chapter formats formats = [] for chapter_id in chapters: format = { 'size': 0, 'length': 0, 'modified': '', 'identifier': chapter_id, 'url': _expand_keys(chapter_url, {'chapter': chapter_id}), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ] } formats.append(format) return formats def _make_expansion_variables(media_block, content_version): """ Creates a dictionary of expansion variables for media items. :param self: :param media_block: :param content_version: :return: """ vars = copy.copy(media_block) # strip black listed keys black_list = ['url', 'chapter_url'] for key in black_list: if key in vars: del vars[key] # TRICKY: using `latest` as an expansion variable in urls is not explicitly stated in the spec, # but it's a common misunderstanding so we allow it. vars['latest'] = '{}'.format(content_version) return vars def _expand_keys(target, replacements): """ Replaces all the dict keys found in the string with the dict values. Keys in the string must be delimited by brackets {} :param target: :param replacements: :return: """ if isinstance(target, basestring) or isinstance(target, str): result = target if not isinstance(replacements, dict): raise Exception('Expected dictionary of replacements but received {}'.format(type(replacements))) for key in replacements: if not isinstance(replacements[key], list): result = re.sub(r'{\s*' + key + '\s*}', '{}'.format(replacements[key]), result) return result elif isinstance(target, int): return target else: raise Exception('Invalid replacement target "{}". Expected string but received {}'.format(target, type(target)))
[((192, 11, 192, 33), 'copy.copy', 'copy.copy', ({(192, 21, 192, 32): 'media_block'}, {}), '(media_block)', False, 'import copy\n')]
Brad19940809/django-customflow
django_customflow/mixins.py
502eed512d7c29e8d176c67fa62a7fce0be492d7
# -*- coding:utf-8 -*- # create_time: 2019/8/5 16:02 # __author__ = 'brad' from . import utils from .tasks.base import WaitingTask, BaseTask class WorkflowMixin(object): """Mixin class to make objects workflow aware. """ def get_workflow(self): """Returns the current workflow of the object. """ return utils.get_workflow(self) def remove_workflow(self): """Removes the workflow from the object. After this function has been called the object has no *own* workflow anymore (it might have one via its content type). """ return utils.remove_workflow_from_object(self) def set_workflow(self, workflow): """Sets the passed workflow to the object. This will set the local workflow for the object. If the object has already the given workflow nothing happens. Otherwise the object gets the passed workflow and the state is set to the workflow's initial state. **Parameters:** workflow The workflow which should be set to the object. Can be a Workflow instance or a string with the workflow name. obj The object which gets the passed workflow. """ return utils.set_workflow_for_object(self, workflow) def get_state(self): """Returns the current workflow state of the object. """ return utils.get_state(self) def set_state(self, state): """Sets the workflow state of the object. """ return utils.set_state(self, state) def set_initial_state(self): """Sets the initial state of the current workflow to the object. """ return self.set_state(self.get_workflow().initial_state) def do_transition(self, transition, user): """Processes the passed transition (if allowed). """ return utils.do_transition(self, transition, user) def do_next_state(self): if self.state_is_waiting(): print("state is waiting! please use method .state_end_waiting() when the WaitingTask has finished.") state = self.get_state() transitions = state.transitions.all() # info:这里代表状态节点是最后的一层了 if not transitions: print(state.name, "is the end state") return False for transition in transitions: if transition.condition: cond = utils.import_from_string(transition.condition) # todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了 if not cond().run(self, transition): continue if transition.task: # todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善 task = utils.import_from_string(transition.task)() if not isinstance(task, (BaseTask, WaitingTask)): raise TypeError("This task is not Basetask or WaitingTask instance") task.run(self, transition) next_state_instance = transition.destination self.set_state(next_state_instance) # info:This is the waiting task setting. if transition.task and isinstance(task, WaitingTask): self.state_set_waiting() # info:记录日志 self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name) # todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行 if not next_state_instance.manual: return self.do_next_state() return True def set_log(self, state, source_state=None, transition=None): return utils.set_log(self, state, source_state, transition) def get_log(self): return utils.get_log(self) def workflow_is_finished(self): state = self.get_state() if not state.transitions.all(): return True else: return False def state_is_waiting(self): return utils.get_state_relation(self).waiting def state_end_waiting(self): state_relation = utils.get_state_relation(self) if not state_relation.waiting: print("there is no need to set") return None state_relation.waiting = False state_relation.save() def state_set_waiting(self): state_relation = utils.get_state_relation(self) if state_relation.waiting: print("there is no need to set") return None state_relation.waiting = True state_relation.save()
[]
fossabot/django-video-encoding
video_encoding/fields.py
16a88c2d61d28e6f5ec2b49956ce356f8c458c67
from django.db.models.fields.files import (FieldFile, ImageField, ImageFileDescriptor) from django.utils.translation import ugettext as _ from .backends import get_backend_class from .files import VideoFile class VideoFileDescriptor(ImageFileDescriptor): pass class VideoFieldFile(VideoFile, FieldFile): def delete(self, save=True): # Clear the video info cache if hasattr(self, '_info_cache'): del self._info_cache super(VideoFieldFile, self).delete(save=save) class VideoField(ImageField): attr_class = VideoFieldFile descriptor_class = VideoFileDescriptor description = _("Video") def __init__(self, verbose_name=None, name=None, duration_field=None, **kwargs): self.duration_field = duration_field super(VideoField, self).__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super(ImageField, self).check(**kwargs) errors.extend(self._check_backend()) return errors def _check_backend(self): backend = get_backend_class() return backend.check() def to_python(self, data): # use FileField method return super(ImageField, self).to_python(data) def update_dimension_fields(self, instance, force=False, *args, **kwargs): _file = getattr(instance, self.attname) # we need a real file if not _file._committed: return # write `width` and `height` super(VideoField, self).update_dimension_fields(instance, force, *args, **kwargs) if not self.duration_field: return # Nothing to update if we have no file and not being forced to update. if not _file and not force: return if getattr(instance, self.duration_field) and not force: return # get duration if file is defined duration = _file.duration if _file else None # update duration setattr(instance, self.duration_field, duration) def formfield(self, **kwargs): # use normal FileFieldWidget for now return super(ImageField, self).formfield(**kwargs)
[((24, 18, 24, 28), 'django.utils.translation.ugettext', '_', ({(24, 20, 24, 27): '"""Video"""'}, {}), "('Video')", True, 'from django.utils.translation import ugettext as _\n')]
boristown/leetcode
BST.py
2e510b7913653da75cd9d10f1adce4c466e74768
class BST: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right @staticmethod def array2BST(array): ''' array:sorted array ''' n = len(array) if n == 0: return None m = n//2 left,root,right = array[:m],array[m],array[m+1:] return BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod def BST2array(node): ''' node:BST node ''' if not node: return [] return BST.BST2array(node.left)+[node.val]+BST.BST2array(node.right)
[]
raghu1121/SLM-Lab
test/spec/test_spec.py
58e98b6521f581515d04ebacff5226105237ed9b
from flaky import flaky from slm_lab.experiment.control import Trial from slm_lab.experiment.monitor import InfoSpace from slm_lab.lib import util from slm_lab.spec import spec_util import os import pandas as pd import pytest import sys # helper method to run all tests in test_spec def run_trial_test(spec_file, spec_name=False): spec = spec_util.get(spec_file, spec_name) spec = spec_util.override_test_spec(spec) info_space = InfoSpace() info_space.tick('trial') trial = Trial(spec, info_space) trial_data = trial.run() assert isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'), # ('experimental/reinforce.json', 'reinforce_conv_breakout'), ]) def test_reinforce(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ]) def test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json', 'a2c_conv_shared_breakout'), # ('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ]) def test_a2c(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ]) def test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json', 'ppo_conv_shared_breakout'), # ('experimental/ppo.json', 'ppo_conv_separate_breakout'), ]) def test_ppo(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ]) def test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ]) def test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ]) def test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'), # ('experimental/sil.json', 'sil_conv_shared_breakout'), # ('experimental/sil.json', 'sil_conv_separate_breakout'), ]) def test_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'), ]) def test_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ]) def test_sarsa(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json', 'dqn_boltzmann_breakout'), # ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ]) def test_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ]) def test_ddqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ]) def test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'dqn_pong'), # ('experimental/a2c.json', 'a2c_pong'), ]) def test_atari(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ]) def test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json', 'random_cartpole'), ('random.json', 'random_pendulum'), # ('base.json', 'multi_agent'), # ('base.json', 'multi_agent_multi_env'), ]) def test_base(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'multi_body'), ('base.json', 'multi_env'), ]) def test_base_multi(spec_file, spec_name): run_trial_test(spec_file, spec_name)
[((23, 1, 27, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(23, 25, 23, 46): '"""spec_file,spec_name"""', (23, 48, 27, 1): "[('experimental/reinforce.json', 'reinforce_mlp_cartpole'), (\n 'experimental/reinforce.json', 'reinforce_rnn_cartpole')]"}, {}), "('spec_file,spec_name', [(\n 'experimental/reinforce.json', 'reinforce_mlp_cartpole'), (\n 'experimental/reinforce.json', 'reinforce_rnn_cartpole')])", False, 'import pytest\n'), ((32, 1, 35, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(32, 25, 32, 46): '"""spec_file,spec_name"""', (32, 48, 35, 1): "[('experimental/reinforce.json', 'reinforce_mlp_pendulum'), (\n 'experimental/reinforce.json', 'reinforce_rnn_pendulum')]"}, {}), "('spec_file,spec_name', [(\n 'experimental/reinforce.json', 'reinforce_mlp_pendulum'), (\n 'experimental/reinforce.json', 'reinforce_rnn_pendulum')])", False, 'import pytest\n'), ((40, 1, 48, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(40, 25, 40, 46): '"""spec_file,spec_name"""', (40, 48, 48, 1): "[('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), (\n 'experimental/a2c.json', 'a2c_mlp_separate_cartpole'), (\n 'experimental/a2c.json', 'a2c_rnn_shared_cartpole'), (\n 'experimental/a2c.json', 'a2c_rnn_separate_cartpole'), (\n 'experimental/a2c.json', 'a2c_mlp_concat_cartpole')]"}, {}), "('spec_file,spec_name', [('experimental/a2c.json',\n 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json',\n 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json',\n 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json',\n 'a2c_rnn_separate_cartpole'), ('experimental/a2c.json',\n 'a2c_mlp_concat_cartpole')])", False, 'import pytest\n'), ((53, 1, 58, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(53, 25, 53, 46): '"""spec_file,spec_name"""', (53, 48, 58, 1): "[('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), (\n 'experimental/a2c.json', 'a2c_mlp_separate_pendulum'), (\n 'experimental/a2c.json', 'a2c_rnn_shared_pendulum'), (\n 'experimental/a2c.json', 'a2c_rnn_separate_pendulum')]"}, {}), "('spec_file,spec_name', [('experimental/a2c.json',\n 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json',\n 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json',\n 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json',\n 'a2c_rnn_separate_pendulum')])", False, 'import pytest\n'), ((63, 1, 70, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(63, 25, 63, 46): '"""spec_file,spec_name"""', (63, 48, 70, 1): "[('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), (\n 'experimental/ppo.json', 'ppo_mlp_separate_cartpole'), (\n 'experimental/ppo.json', 'ppo_rnn_shared_cartpole'), (\n 'experimental/ppo.json', 'ppo_rnn_separate_cartpole')]"}, {}), "('spec_file,spec_name', [('experimental/ppo.json',\n 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json',\n 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json',\n 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json',\n 'ppo_rnn_separate_cartpole')])", False, 'import pytest\n'), ((75, 1, 80, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(75, 25, 75, 46): '"""spec_file,spec_name"""', (75, 48, 80, 1): "[('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), (\n 'experimental/ppo.json', 'ppo_mlp_separate_pendulum'), (\n 'experimental/ppo.json', 'ppo_rnn_shared_pendulum'), (\n 'experimental/ppo.json', 'ppo_rnn_separate_pendulum')]"}, {}), "('spec_file,spec_name', [('experimental/ppo.json',\n 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json',\n 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json',\n 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json',\n 'ppo_rnn_separate_pendulum')])", False, 'import pytest\n'), ((86, 1, 91, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(86, 25, 86, 46): '"""spec_file,spec_name"""', (86, 48, 91, 1): "[('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), (\n 'experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), (\n 'experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), (\n 'experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole')]"}, {}), "('spec_file,spec_name', [(\n 'experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), (\n 'experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), (\n 'experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), (\n 'experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole')])", False, 'import pytest\n'), ((97, 1, 102, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(97, 25, 97, 46): '"""spec_file,spec_name"""', (97, 48, 102, 1): "[('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), (\n 'experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), (\n 'experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), (\n 'experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum')]"}, {}), "('spec_file,spec_name', [(\n 'experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), (\n 'experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), (\n 'experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), (\n 'experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum')])", False, 'import pytest\n'), ((108, 1, 115, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(108, 25, 108, 46): '"""spec_file,spec_name"""', (108, 48, 115, 1): "[('experimental/sil.json', 'sil_mlp_shared_cartpole'), (\n 'experimental/sil.json', 'sil_mlp_separate_cartpole'), (\n 'experimental/sil.json', 'sil_rnn_shared_cartpole'), (\n 'experimental/sil.json', 'sil_rnn_separate_cartpole')]"}, {}), "('spec_file,spec_name', [('experimental/sil.json',\n 'sil_mlp_shared_cartpole'), ('experimental/sil.json',\n 'sil_mlp_separate_cartpole'), ('experimental/sil.json',\n 'sil_rnn_shared_cartpole'), ('experimental/sil.json',\n 'sil_rnn_separate_cartpole')])", False, 'import pytest\n'), ((121, 1, 126, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(121, 25, 121, 46): '"""spec_file,spec_name"""', (121, 48, 126, 1): "[('experimental/sil.json', 'sil_mlp_shared_pendulum'), (\n 'experimental/sil.json', 'sil_mlp_separate_pendulum'), (\n 'experimental/sil.json', 'sil_rnn_shared_pendulum'), (\n 'experimental/sil.json', 'sil_rnn_separate_pendulum')]"}, {}), "('spec_file,spec_name', [('experimental/sil.json',\n 'sil_mlp_shared_pendulum'), ('experimental/sil.json',\n 'sil_mlp_separate_pendulum'), ('experimental/sil.json',\n 'sil_rnn_shared_pendulum'), ('experimental/sil.json',\n 'sil_rnn_separate_pendulum')])", False, 'import pytest\n'), ((131, 1, 138, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(131, 25, 131, 46): '"""spec_file,spec_name"""', (131, 48, 138, 1): "[('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), (\n 'experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), (\n 'experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), (\n 'experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole')]"}, {}), "('spec_file,spec_name', [('experimental/sarsa.json',\n 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json',\n 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json',\n 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json',\n 'sarsa_rnn_epsilon_greedy_cartpole')])", False, 'import pytest\n'), ((143, 1, 152, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(143, 25, 143, 46): '"""spec_file,spec_name"""', (143, 48, 152, 1): "[('experimental/dqn.json', 'vanilla_dqn_cartpole'), (\n 'experimental/dqn.json', 'dqn_boltzmann_cartpole'), (\n 'experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), (\n 'experimental/dqn.json', 'drqn_boltzmann_cartpole'), (\n 'experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), (\n 'experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar')]"}, {}), "('spec_file,spec_name', [('experimental/dqn.json',\n 'vanilla_dqn_cartpole'), ('experimental/dqn.json',\n 'dqn_boltzmann_cartpole'), ('experimental/dqn.json',\n 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json',\n 'drqn_boltzmann_cartpole'), ('experimental/dqn.json',\n 'drqn_epsilon_greedy_cartpole'), ('experimental/dqn.json',\n 'dqn_stack_epsilon_greedy_lunar')])", False, 'import pytest\n'), ((157, 1, 164, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(157, 25, 157, 46): '"""spec_file,spec_name"""', (157, 48, 164, 1): "[('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), (\n 'experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), (\n 'experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), (\n 'experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole')]"}, {}), "('spec_file,spec_name', [('experimental/ddqn.json',\n 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json',\n 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json',\n 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json',\n 'ddrqn_epsilon_greedy_cartpole')])", False, 'import pytest\n'), ((169, 1, 174, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(169, 25, 169, 46): '"""spec_file,spec_name"""', (169, 48, 174, 1): "[('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), (\n 'experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole')]"}, {}), "('spec_file,spec_name', [(\n 'experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), (\n 'experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole')])", False, 'import pytest\n'), ((179, 1, 183, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(179, 25, 179, 46): '"""spec_file,spec_name"""', (179, 48, 183, 1): "[('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), (\n 'experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole')]"}, {}), "('spec_file,spec_name', [(\n 'experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), (\n 'experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole')])", False, 'import pytest\n'), ((189, 1, 192, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(189, 25, 189, 46): '"""spec_file,spec_name"""', (189, 48, 192, 1): "[('experimental/dqn.json', 'dqn_pong')]"}, {}), "('spec_file,spec_name', [('experimental/dqn.json',\n 'dqn_pong')])", False, 'import pytest\n'), ((197, 1, 199, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(197, 25, 197, 46): '"""spec_file,spec_name"""', (197, 48, 199, 1): "[('experimental/reinforce.json', 'reinforce_conv_vizdoom')]"}, {}), "('spec_file,spec_name', [(\n 'experimental/reinforce.json', 'reinforce_conv_vizdoom')])", False, 'import pytest\n'), ((204, 1, 211, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(204, 25, 204, 46): '"""spec_file,spec_name"""', (204, 48, 211, 1): "[('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'), (\n 'random.json', 'random_cartpole'), ('random.json', 'random_pendulum')]"}, {}), "('spec_file,spec_name', [('base.json',\n 'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json',\n 'random_cartpole'), ('random.json', 'random_pendulum')])", False, 'import pytest\n'), ((216, 1, 219, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(216, 25, 216, 46): '"""spec_file,spec_name"""', (216, 48, 219, 1): "[('base.json', 'multi_body'), ('base.json', 'multi_env')]"}, {}), "('spec_file,spec_name', [('base.json', 'multi_body'),\n ('base.json', 'multi_env')])", False, 'import pytest\n'), ((14, 11, 14, 46), 'slm_lab.spec.spec_util.get', 'spec_util.get', ({(14, 25, 14, 34): 'spec_file', (14, 36, 14, 45): 'spec_name'}, {}), '(spec_file, spec_name)', False, 'from slm_lab.spec import spec_util\n'), ((15, 11, 15, 45), 'slm_lab.spec.spec_util.override_test_spec', 'spec_util.override_test_spec', ({(15, 40, 15, 44): 'spec'}, {}), '(spec)', False, 'from slm_lab.spec import spec_util\n'), ((16, 17, 16, 28), 'slm_lab.experiment.monitor.InfoSpace', 'InfoSpace', ({}, {}), '()', False, 'from slm_lab.experiment.monitor import InfoSpace\n'), ((18, 12, 18, 35), 'slm_lab.experiment.control.Trial', 'Trial', ({(18, 18, 18, 22): 'spec', (18, 24, 18, 34): 'info_space'}, {}), '(spec, info_space)', False, 'from slm_lab.experiment.control import Trial\n')]
Sfairat00/training_python
test/test_modify_group.py
14562b377d19bf22fc077e02efc7e56e73785a55
from model.group import Group def test_modify_group_name(app): if app.group.count() == 0: app.group.create(Group(name="test")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(name="New group")) new_groups = app.group.get_group_list() assert len(old_groups) == len(new_groups) def test_modify_group_header(app): if app.group.count() == 0: app.group.create(Group(header="test")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(header="New header")) new_groups = app.group.get_group_list() assert len(old_groups) == len(new_groups)
[((8, 33, 8, 56), 'model.group.Group', 'Group', (), '', False, 'from model.group import Group\n'), ((18, 33, 18, 59), 'model.group.Group', 'Group', (), '', False, 'from model.group import Group\n'), ((6, 25, 6, 43), 'model.group.Group', 'Group', (), '', False, 'from model.group import Group\n'), ((16, 25, 16, 45), 'model.group.Group', 'Group', (), '', False, 'from model.group import Group\n')]
readmeio/metrics-sdks-python
readme_metrics/MetricsMiddleware.py
02bc6e486260641f1a62760d20370157a4928af6
import io import time import datetime from readme_metrics.Metrics import Metrics from readme_metrics.MetricsApiConfig import MetricsApiConfig from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper from werkzeug import Request class MetricsMiddleware: """Core middleware class for ReadMe Metrics Attributes: config (MetricsApiConfig): Contains the configuration settings for the running middleware instance """ def __init__(self, wsgi_app_reference, config: MetricsApiConfig): """ Constructs and initializes MetricsMiddleware WSGI middleware to be passed into the currently running WSGI web server. Args: wsgi_app_reference ([type]): Reference to the current WSGI application, which will be wrapped config (MetricsApiConfig): Instance of MetricsApiConfig object """ self.config = config self.app = wsgi_app_reference self.metrics_core = Metrics(config) def __call__(self, environ, start_response): """Method that is called by the running WSGI server. You should NOT be calling this method yourself under normal circumstances. """ response_headers = {} response_status = 0 iterable = None req = Request(environ) def _start_response(_status, _response_headers, *args): write = start_response(_status, _response_headers, *args) # Populate response info (headers & status) nonlocal response_headers, response_status response_headers = _response_headers response_status = _status return write try: req.rm_start_dt = str(datetime.datetime.utcnow()) req.rm_start_ts = int(time.time() * 1000) if req.method == "POST": # The next 4 lines are a workaround for a serious shortcoming in the # WSGI spec. # # The data can only be read once, after which the socket is exhausted # and cannot be read again. As such, we read the data and then # repopulate the variable so that it can be used by other code down the # pipeline. # # For more info: https://stackoverflow.com/a/13106009/643951 # the environment variable CONTENT_LENGTH may be empty or missing try: content_length = int(environ.get("CONTENT_LENGTH", 0)) except (ValueError): content_length = 0 content_body = environ["wsgi.input"].read(content_length) # guarding check to close stream if hasattr(environ["CONTENT_LENGTH"], "close"): environ["wsgi.input"].close() environ["wsgi.input"] = io.BytesIO(content_body) req.rm_content_length = content_length req.rm_body = content_body iterable = self.app(environ, _start_response) for data in iterable: res_ctype = "" res_clength = 0 htype = next( (h for h in response_headers if h[0] == "Content-Type"), None ) hlength = next( (h for h in response_headers if h[0] == "Content-Length"), None ) if htype and hlength: res_ctype = htype[1] res_clength = int(hlength[1]) # Populate response body res = ResponseInfoWrapper( response_headers, response_status, res_ctype, res_clength, data.decode("utf-8"), ) # Send off data to be queued (and processed) by ReadMe if allowed self.metrics_core.process(req, res) yield data finally: # Undocumented in WSGI spec but the iterable has to be closed if hasattr(iterable, "close"): iterable.close()
[((31, 28, 31, 43), 'readme_metrics.Metrics.Metrics', 'Metrics', ({(31, 36, 31, 42): 'config'}, {}), '(config)', False, 'from readme_metrics.Metrics import Metrics\n'), ((41, 14, 41, 30), 'werkzeug.Request', 'Request', ({(41, 22, 41, 29): 'environ'}, {}), '(environ)', False, 'from werkzeug import Request\n'), ((55, 34, 55, 60), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n'), ((80, 40, 80, 64), 'io.BytesIO', 'io.BytesIO', ({(80, 51, 80, 63): 'content_body'}, {}), '(content_body)', False, 'import io\n'), ((56, 34, 56, 45), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')]
deekshaarya4/gymexperiments
kbrl.py
2d503ba14fcfba41339de25dd78d649bd12693e6
import numpy as np import gym from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt import argparse parser = argparse.ArgumentParser(description='KBRL with KNN') parser.add_argument('--episodes', nargs='?', type=int, default=500) parser.add_argument('--max_timesteps', nargs='?', type=int, default=200) parser.add_argument('environment') args = parser.parse_args() env = gym.make(args.environment).env action_space = env.action_space # hyperparameters: epsilon = 1.0 exploration_decay = 0.98 k = 500 # number of nearest neighbors minimum_num_iters = 500 # number of iterations used for training num_iter = 0 max_iters = 0 gamma = 0.95 max_state_size = 15000 # because we don't know the state space size in continuous environments # learning-related variables states = None actions = {} rewards = {} values = {} # episode-related variables episode_beginning = 0 def make_move(observation, reward, done): global states, actions, values, rewards, num_iter, episode_beginning, max_iters, epsilon if states is None: # first state observed states = np.zeros((max_state_size, observation.size)) if num_iter > minimum_num_iters and np.random.rand() > epsilon and values: # if amount of data is sufficient and values is populated (atleast one episode has been run) # testing phase: exploitation # Uses k=500 nearest neighbors to pick the action which has the highest reward nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances, indices = nbrs.kneighbors(observation) # find the best action action_list = {} freq_list = {} for i in indices[0]: v = values[i] a = actions[i] vnew = action_list.get(a, 0) + v action_list[a] = vnew freq_list[a] = freq_list.get(a, 0) + 1 # normalize by number of times action occured and take action with highest value for act in action_list: action_list[act] = action_list[act] / freq_list[act] sorted_list = [(y,x) for x,y in action_list.items()] sorted_list.sort(reverse=True) take_action = sorted_list[0][1] else: # training phase: exploration randomly picks an action take_action = action_space.sample() # populate the state present, action taken and reward obtained if num_iter < max_state_size: states[num_iter] = observation # save the state actions[num_iter] = take_action # and the action we took rewards[num_iter-1] = reward # and the reward we obtained last time step values[num_iter-1] = 0 num_iter += 1 if done: # end of episode: calculate the value function for this episode val = 0 for t in reversed(range(episode_beginning, num_iter)): val = gamma * val + rewards.get(t,0) values[t] = val episode_beginning = num_iter max_iters = min(max(max_iters, num_iter), max_state_size) # decay exploration probability epsilon *= exploration_decay # do not decay below 0 epsilon = max(epsilon, 0) return take_action # Ignore sklearn warnings def warn(*args, **kwargs): pass import warnings warnings.warn = warn reward = 0 episode_reward = 0 done = False cumulative_reward_list = [] for i in range(args.episodes): observation = env.reset() sum_reward = 0 for j in range(args.max_timesteps): env.render() action = make_move(observation, reward, done) observation, reward, done, _ = env.step(action) sum_reward += reward if done: break episode_reward = episode_reward * 0.95 + sum_reward * 0.05 print('Reward for episode '+ str(i)+' : '+str(episode_reward)) cumulative_reward_list.append(episode_reward) # env.render() plt.plot(range(0,500), cumulative_reward_list, linewidth=2) plt.xlabel("Episodes") plt.ylabel("Cumulative Reward") plt.title("Performance") plt.show() plt.close()
[((7, 9, 7, 61), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((126, 0, 126, 22), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(126, 11, 126, 21): '"""Episodes"""'}, {}), "('Episodes')", True, 'import matplotlib.pyplot as plt\n'), ((127, 0, 127, 31), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(127, 11, 127, 30): '"""Cumulative Reward"""'}, {}), "('Cumulative Reward')", True, 'import matplotlib.pyplot as plt\n'), ((128, 0, 128, 24), 'matplotlib.pyplot.title', 'plt.title', ({(128, 10, 128, 23): '"""Performance"""'}, {}), "('Performance')", True, 'import matplotlib.pyplot as plt\n'), ((129, 0, 129, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((130, 0, 130, 11), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((13, 6, 13, 32), 'gym.make', 'gym.make', ({(13, 15, 13, 31): 'args.environment'}, {}), '(args.environment)', False, 'import gym\n'), ((39, 17, 39, 61), 'numpy.zeros', 'np.zeros', ({(39, 26, 39, 60): '(max_state_size, observation.size)'}, {}), '((max_state_size, observation.size))', True, 'import numpy as np\n'), ((41, 40, 41, 56), 'numpy.random.rand', 'np.random.rand', ({}, {}), '()', True, 'import numpy as np\n')]
vinci-project/rootShard
shardDesigner/shardTemplateDir/shardStemDir/log/elast.py
2f6633c7fb1c1b690c0a38ffbb16af0b50d532bb
import elasticsearch from elasticsearch import Elasticsearch from elasticsearch import helpers import time, json, datetime, os class elalog: def __init__(self, date): es_host = os.getenv("ES_PORT_9200_TCP_ADDR") or '<%ELASTICIP%>' es_port = os.getenv("ES_PORT_9200_TCP_PORT") or '9200' self.lastDate = date self.es = Elasticsearch([{'host': es_host, 'port': es_port}]) # BLOCKS INDEX self.blocks_index_name = "blocks-" + date self.block_mapping = { "settings": { "number_of_shards": 5, "number_of_replicas": 0 }, "mappings": { "blocks-" + date: { "properties": { "@dtime": { "type": "date", "format": "epoch_second" }, "hash": { "type": "text" }, "signatures": { "type": "text" }, "tcount": { "type": "long" }, "validator": { "type": "text", "fielddata": True }, "bheight": { "type": "long" } } } } } if self.es.indices.exists(self.blocks_index_name): try: self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) except elasticsearch.ElasticsearchException as es1: print("Elastic exception on create Indicies:", es1) else: self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) # TRANSACTIONS INDEX self.transactions_index_name = "transactions-" + date self.transactions_mapping = { "settings": { "number_of_shards": 5, "number_of_replicas": 0 }, "mappings": { "transactions-" + date: { "properties": { "@dtime": { "type": "date", "format": "epoch_second" }, "sender": { "type": "text", "fielddata": True }, "receiver": { "type": "text", "fielddata": True }, "token_count": { "type": "float" }, "token_type": { "type": "text", "fielddata": True }, "hash": { "type": "text" }, "block": { "type": "long" } } } } } if self.es.indices.exists(self.transactions_index_name): try: self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) except elasticsearch.ElasticsearchException as es1: print("Elastic exception on create Indicies:", es1) else: self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) # BALANCE HISTORY self.balance_index_name = "balance" self.balance_mapping = { "settings": { "number_of_shards": 5, "number_of_replicas": 0 }, "mappings": { "balance": { "properties": { "@dtime": { "type": "date", "format": "epoch_second" }, "user": { "type": "text", "fielddata": True }, "balance": { "type": "float" } } } } } if self.es.indices.exists(self.balance_index_name): try: self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) except elasticsearch.ElasticsearchException as es1: print("Elastic exception on create Indicies:", es1) else: self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) # VALIDATOR STATISTIC self.clients_index_name = "clients" self.clients_mapping = { "settings": { "number_of_shards": 5, "number_of_replicas": 0 }, "mappings": { "clients": { "properties": { "@dtime": { "type": "date", "format": "epoch_second" }, "ip": { "type": "ip" }, "geoip": { "properties": { "city_name": { "type": "text" }, "continent_name": { "type": "text" }, "country_iso_code": { "type": "text" }, "location": { "type": "geo_point" }, "region_name": { "type": "text" } } }, "public_key": { "type": "text", "fielddata": True }, "client_type": { "type": "text", "fielddata": True } } } } } if self.es.indices.exists(self.clients_index_name): try: self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) except elasticsearch.ElasticsearchException as es1: print("Elastic exception on create Indicies:", es1) else: self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) def elasticClients(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as es1: print("Elastic exception on save Validators:", es1) print("Save Validators in elastic!") def elasticBlock(self, timestamp:float, validator:str, tcount:int, signatures:list, hash:str, bheight:int): index = 'blocks-' + self.lastDate estype = 'blocks-' + self.lastDate eljson = json.dumps({"@dtime": int(timestamp), "validator": validator, "tcount": tcount, "signatures": list(signatures), "hash": hash, "bheight": bheight}, separators=(',', ':')) try: self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson) except elasticsearch.ElasticsearchException as es1: print("Elastic exception on send Block:", es1) def elasticTransaction(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as es1: print("Elastic exception on save bulk Transactions:", es1) def elasticBalanceHistory(self, balance:dict): users = balance.keys() jsonMas = [] print("USER LEN:", len(users)) for user in users: eljson = {"_index": "balance", "_type": "balance", "_id": user, "_source": {"@dtime": int(time.time()), "user": user, "balance": balance.get(user)}} jsonMas.append(eljson) try: helpers.bulk(self.es, jsonMas) except elasticsearch.ElasticsearchException as es1: print("Elastic exception on save balance:", es1) def getLastEBlock(self): query = {"aggs" : { "max_blnum":{"max":{"field":"bheight"}} },"size": 0 } try: answer = self.es.search(index="blocks-" + self.lastDate, doc_type="blocks-" + self.lastDate, body=query) if not answer["aggregations"]["max_blnum"]["value"] == None: return int(answer["aggregations"]["max_blnum"]["value"]) else: return 0 except elasticsearch.ElasticsearchException as es1: print("Elastic exception on search last block index:", es1)
[((13, 18, 13, 69), 'elasticsearch.Elasticsearch', 'Elasticsearch', ({(13, 32, 13, 68): "[{'host': es_host, 'port': es_port}]"}, {}), "([{'host': es_host, 'port': es_port}])", False, 'from elasticsearch import Elasticsearch\n'), ((10, 18, 10, 52), 'os.getenv', 'os.getenv', ({(10, 28, 10, 51): '"""ES_PORT_9200_TCP_ADDR"""'}, {}), "('ES_PORT_9200_TCP_ADDR')", False, 'import time, json, datetime, os\n'), ((11, 18, 11, 52), 'os.getenv', 'os.getenv', ({(11, 28, 11, 51): '"""ES_PORT_9200_TCP_PORT"""'}, {}), "('ES_PORT_9200_TCP_PORT')", False, 'import time, json, datetime, os\n'), ((203, 12, 203, 40), 'elasticsearch.helpers.bulk', 'helpers.bulk', ({(203, 25, 203, 32): 'self.es', (203, 34, 203, 39): 'jsons'}, {}), '(self.es, jsons)', False, 'from elasticsearch import helpers\n'), ((220, 12, 220, 40), 'elasticsearch.helpers.bulk', 'helpers.bulk', ({(220, 25, 220, 32): 'self.es', (220, 34, 220, 39): 'jsons'}, {}), '(self.es, jsons)', False, 'from elasticsearch import helpers\n'), ((238, 12, 238, 42), 'elasticsearch.helpers.bulk', 'helpers.bulk', ({(238, 25, 238, 32): 'self.es', (238, 34, 238, 41): 'jsonMas'}, {}), '(self.es, jsonMas)', False, 'from elasticsearch import helpers\n'), ((233, 48, 233, 59), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, json, datetime, os\n')]
dslowikowski/commcare-hq
corehq/apps/sms/tests.py
ad8885cf8dab69dc85cb64f37aeaf06106124797
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from util import clean_phone_number, clean_outgoing_sms_text from django.test import TestCase class UtilTestCase(TestCase): def setUp(self): pass def testCleanPhoneNumber(self): phone_number = " 324 23-23421241" cleaned = clean_phone_number(phone_number) self.assertEquals(cleaned, "+3242323421241") def testCleanOutgoingSMSText(self): text = u"+this is a test شسیبشسی" cleaned = clean_outgoing_sms_text(text) # make sure '+' and unicode get encoded for GET properly self.assertEquals(cleaned, "%2Bthis%20is%20a%20test%20%D8%B4%D8%B3%DB%8C%D8%A8%D8%B4%D8%B3%DB%8C")
[((13, 18, 13, 50), 'util.clean_phone_number', 'clean_phone_number', ({(13, 37, 13, 49): 'phone_number'}, {}), '(phone_number)', False, 'from util import clean_phone_number, clean_outgoing_sms_text\n'), ((18, 18, 18, 47), 'util.clean_outgoing_sms_text', 'clean_outgoing_sms_text', ({(18, 42, 18, 46): 'text'}, {}), '(text)', False, 'from util import clean_phone_number, clean_outgoing_sms_text\n')]
cjgalvin/deepchem
deepchem/models/atomic_conv.py
64993a129e7f0f78fed9500298b1828ac8a0757a
__author__ = "Joseph Gomes" __copyright__ = "Copyright 2017, Stanford University" __license__ = "MIT" import sys from deepchem.models import KerasModel from deepchem.models.layers import AtomicConvolution from deepchem.models.losses import L2Loss from tensorflow.keras.layers import Input, Layer import numpy as np import tensorflow as tf import itertools def initializeWeightsBiases(prev_layer_size, size, weights=None, biases=None, name=None): """Initializes weights and biases to be used in a fully-connected layer. Parameters ---------- prev_layer_size: int Number of features in previous layer. size: int Number of nodes in this layer. weights: tf.Tensor, optional (Default None) Weight tensor. biases: tf.Tensor, optional (Default None) Bias tensor. name: str Name for this op, optional (Defaults to 'fully_connected' if None) Returns ------- weights: tf.Variable Initialized weights. biases: tf.Variable Initialized biases. """ if weights is None: weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01) if biases is None: biases = tf.zeros([size]) w = tf.Variable(weights, name='w') b = tf.Variable(biases, name='b') return w, b class AtomicConvScore(Layer): """The scoring function used by the atomic convolution models.""" def __init__(self, atom_types, layer_sizes, **kwargs): super(AtomicConvScore, self).__init__(**kwargs) self.atom_types = atom_types self.layer_sizes = layer_sizes def build(self, input_shape): self.type_weights = [] self.type_biases = [] self.output_weights = [] self.output_biases = [] n_features = int(input_shape[0][-1]) layer_sizes = self.layer_sizes num_layers = len(layer_sizes) weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes] bias_init_consts = [0.0] * num_layers for ind, atomtype in enumerate(self.atom_types): prev_layer_size = n_features self.type_weights.append([]) self.type_biases.append([]) self.output_weights.append([]) self.output_biases.append([]) for i in range(num_layers): weight, bias = initializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.random.truncated_normal( shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]])) self.type_weights[ind].append(weight) self.type_biases[ind].append(bias) prev_layer_size = layer_sizes[i] weight, bias = initializeWeightsBiases(prev_layer_size, 1) self.output_weights[ind].append(weight) self.output_biases[ind].append(bias) def call(self, inputs): frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs atom_types = self.atom_types num_layers = len(self.layer_sizes) def atomnet(current_input, atomtype): prev_layer = current_input for i in range(num_layers): layer = tf.nn.bias_add( tf.matmul(prev_layer, self.type_weights[atomtype][i]), self.type_biases[atomtype][i]) layer = tf.nn.relu(layer) prev_layer = layer output_layer = tf.squeeze( tf.nn.bias_add( tf.matmul(prev_layer, self.output_weights[atomtype][0]), self.output_biases[atomtype][0])) return output_layer frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy = [] frag2_atomtype_energy = [] complex_atomtype_energy = [] for ind, atomtype in enumerate(atom_types): frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer) frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer) complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer) cond = tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros)) cond = tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros)) cond = tf.equal(complex_z, atomtype) complex_atomtype_energy.append( tf.where(cond, complex_outputs, complex_zeros)) frag1_outputs = tf.add_n(frag1_atomtype_energy) frag2_outputs = tf.add_n(frag2_atomtype_energy) complex_outputs = tf.add_n(complex_atomtype_energy) frag1_energy = tf.reduce_sum(frag1_outputs, 1) frag2_energy = tf.reduce_sum(frag2_outputs, 1) complex_energy = tf.reduce_sum(complex_outputs, 1) binding_energy = complex_energy - (frag1_energy + frag2_energy) return tf.expand_dims(binding_energy, axis=1) class AtomicConvModel(KerasModel): """Implements an Atomic Convolution Model. Implements the atomic convolutional networks as introduced in Gomes, Joseph, et al. "Atomic convolutional networks for predicting protein-ligand binding affinity." arXiv preprint arXiv:1703.10603 (2017). The atomic convolutional networks function as a variant of graph convolutions. The difference is that the "graph" here is the nearest neighbors graph in 3D space. The AtomicConvModel leverages these connections in 3D space to train models that learn to predict energetic state starting from the spatial geometry of the model. """ def __init__(self, frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12, batch_size=24, atom_types=[ 6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35., 53., -1. ], radial=[[ 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0 ], [0.0, 4.0, 8.0], [0.4]], layer_sizes=[32, 32, 16], learning_rate=0.001, **kwargs): """ Parameters ---------- frag1_num_atoms: int Number of atoms in first fragment frag2_num_atoms: int Number of atoms in sec max_num_neighbors: int Maximum number of neighbors possible for an atom. Recall neighbors are spatial neighbors. atom_types: list List of atoms recognized by model. Atoms are indicated by their nuclear numbers. radial: list TODO: add description layer_sizes: list TODO: add description learning_rate: float Learning rate for the model. """ # TODO: Turning off queue for now. Safe to re-activate? self.complex_num_atoms = complex_num_atoms self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms self.max_num_neighbors = max_num_neighbors self.batch_size = batch_size self.atom_types = atom_types rp = [x for x in itertools.product(*radial)] frag1_X = Input(shape=(frag1_num_atoms, 3)) frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_z = Input(shape=(frag1_num_atoms,)) frag2_X = Input(shape=(frag2_num_atoms, 3)) frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_z = Input(shape=(frag2_num_atoms,)) complex_X = Input(shape=(complex_num_atoms, 3)) complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_z = Input(shape=(complex_num_atoms,)) self._frag1_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z]) self._frag2_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z]) self._complex_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z]) score = AtomicConvScore(self.atom_types, layer_sizes)([ self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z, frag2_z, complex_z ]) model = tf.keras.Model( inputs=[ frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs, frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z, complex_z ], outputs=score) super(AtomicConvModel, self).__init__( model, L2Loss(), batch_size=batch_size, **kwargs) def default_generator(self, dataset, epochs=1, mode='fit', deterministic=True, pad_batches=True): batch_size = self.batch_size def replace_atom_types(z): def place_holder(i): if i in self.atom_types: return i return -1 return np.array([place_holder(x) for x in z]) for epoch in range(epochs): for ind, (F_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches( batch_size, deterministic=True, pad_batches=pad_batches)): N = self.complex_num_atoms N_1 = self.frag1_num_atoms N_2 = self.frag2_num_atoms M = self.max_num_neighbors batch_size = F_b.shape[0] num_features = F_b[0][0].shape[1] frag1_X_b = np.zeros((batch_size, N_1, num_features)) for i in range(batch_size): frag1_X_b[i] = F_b[i][0] frag2_X_b = np.zeros((batch_size, N_2, num_features)) for i in range(batch_size): frag2_X_b[i] = F_b[i][3] complex_X_b = np.zeros((batch_size, N, num_features)) for i in range(batch_size): complex_X_b[i] = F_b[i][6] frag1_Nbrs = np.zeros((batch_size, N_1, M)) frag1_Z_b = np.zeros((batch_size, N_1)) for i in range(batch_size): z = replace_atom_types(F_b[i][2]) frag1_Z_b[i] = z frag1_Nbrs_Z = np.zeros((batch_size, N_1, M)) for atom in range(N_1): for i in range(batch_size): atom_nbrs = F_b[i][1].get(atom, "") frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j] frag2_Nbrs = np.zeros((batch_size, N_2, M)) frag2_Z_b = np.zeros((batch_size, N_2)) for i in range(batch_size): z = replace_atom_types(F_b[i][5]) frag2_Z_b[i] = z frag2_Nbrs_Z = np.zeros((batch_size, N_2, M)) for atom in range(N_2): for i in range(batch_size): atom_nbrs = F_b[i][4].get(atom, "") frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j] complex_Nbrs = np.zeros((batch_size, N, M)) complex_Z_b = np.zeros((batch_size, N)) for i in range(batch_size): z = replace_atom_types(F_b[i][8]) complex_Z_b[i] = z complex_Nbrs_Z = np.zeros((batch_size, N, M)) for atom in range(N): for i in range(batch_size): atom_nbrs = F_b[i][7].get(atom, "") complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j] inputs = [ frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b, frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs, complex_Nbrs_Z, complex_Z_b ] y_b = np.reshape(y_b, newshape=(batch_size, 1)) yield (inputs, [y_b], [w_b])
[((51, 6, 51, 36), 'tensorflow.Variable', 'tf.Variable', (), '', True, 'import tensorflow as tf\n'), ((52, 6, 52, 35), 'tensorflow.Variable', 'tf.Variable', (), '', True, 'import tensorflow as tf\n'), ((47, 14, 47, 78), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((49, 13, 49, 29), 'tensorflow.zeros', 'tf.zeros', ({(49, 22, 49, 28): '[size]'}, {}), '([size])', True, 'import tensorflow as tf\n'), ((116, 18, 116, 58), 'tensorflow.zeros_like', 'tf.zeros_like', (), '', True, 'import tensorflow as tf\n'), ((117, 18, 117, 58), 'tensorflow.zeros_like', 'tf.zeros_like', (), '', True, 'import tensorflow as tf\n'), ((118, 20, 118, 62), 'tensorflow.zeros_like', 'tf.zeros_like', (), '', True, 'import tensorflow as tf\n'), ((137, 20, 137, 51), 'tensorflow.add_n', 'tf.add_n', ({(137, 29, 137, 50): 'frag1_atomtype_energy'}, {}), '(frag1_atomtype_energy)', True, 'import tensorflow as tf\n'), ((138, 20, 138, 51), 'tensorflow.add_n', 'tf.add_n', ({(138, 29, 138, 50): 'frag2_atomtype_energy'}, {}), '(frag2_atomtype_energy)', True, 'import tensorflow as tf\n'), ((139, 22, 139, 55), 'tensorflow.add_n', 'tf.add_n', ({(139, 31, 139, 54): 'complex_atomtype_energy'}, {}), '(complex_atomtype_energy)', True, 'import tensorflow as tf\n'), ((141, 19, 141, 50), 'tensorflow.reduce_sum', 'tf.reduce_sum', ({(141, 33, 141, 46): 'frag1_outputs', (141, 48, 141, 49): '1'}, {}), '(frag1_outputs, 1)', True, 'import tensorflow as tf\n'), ((142, 19, 142, 50), 'tensorflow.reduce_sum', 'tf.reduce_sum', ({(142, 33, 142, 46): 'frag2_outputs', (142, 48, 142, 49): '1'}, {}), '(frag2_outputs, 1)', True, 'import tensorflow as tf\n'), ((143, 21, 143, 54), 'tensorflow.reduce_sum', 'tf.reduce_sum', ({(143, 35, 143, 50): 'complex_outputs', (143, 52, 143, 53): '1'}, {}), '(complex_outputs, 1)', True, 'import tensorflow as tf\n'), ((145, 11, 145, 49), 'tensorflow.expand_dims', 'tf.expand_dims', (), '', True, 'import tensorflow as tf\n'), ((209, 14, 209, 47), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((210, 17, 210, 66), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((211, 19, 211, 68), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((212, 14, 212, 45), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((214, 14, 214, 47), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((215, 17, 215, 66), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((216, 19, 216, 68), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((217, 14, 217, 45), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((219, 16, 219, 51), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((220, 19, 220, 70), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((221, 21, 221, 72), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((222, 16, 222, 49), 'tensorflow.keras.layers.Input', 'Input', (), '', False, 'from tensorflow.keras.layers import Input, Layer\n'), ((241, 12, 247, 22), 'tensorflow.keras.Model', 'tf.keras.Model', (), '', True, 'import tensorflow as tf\n'), ((129, 13, 129, 40), 'tensorflow.equal', 'tf.equal', ({(129, 22, 129, 29): 'frag1_z', (129, 31, 129, 39): 'atomtype'}, {}), '(frag1_z, atomtype)', True, 'import tensorflow as tf\n'), ((131, 13, 131, 40), 'tensorflow.equal', 'tf.equal', ({(131, 22, 131, 29): 'frag2_z', (131, 31, 131, 39): 'atomtype'}, {}), '(frag2_z, atomtype)', True, 'import tensorflow as tf\n'), ((133, 13, 133, 42), 'tensorflow.equal', 'tf.equal', ({(133, 22, 133, 31): 'complex_z', (133, 33, 133, 41): 'atomtype'}, {}), '(complex_z, atomtype)', True, 'import tensorflow as tf\n'), ((224, 23, 226, 21), 'deepchem.models.layers.AtomicConvolution', 'AtomicConvolution', (), '', False, 'from deepchem.models.layers import AtomicConvolution\n'), ((228, 23, 230, 21), 'deepchem.models.layers.AtomicConvolution', 'AtomicConvolution', (), '', False, 'from deepchem.models.layers import AtomicConvolution\n'), ((232, 25, 234, 21), 'deepchem.models.layers.AtomicConvolution', 'AtomicConvolution', (), '', False, 'from deepchem.models.layers import AtomicConvolution\n'), ((249, 15, 249, 23), 'deepchem.models.losses.L2Loss', 'L2Loss', ({}, {}), '()', False, 'from deepchem.models.losses import L2Loss\n'), ((72, 31, 72, 41), 'numpy.sqrt', 'np.sqrt', ({(72, 39, 72, 40): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((107, 16, 107, 33), 'tensorflow.nn.relu', 'tf.nn.relu', ({(107, 27, 107, 32): 'layer'}, {}), '(layer)', True, 'import tensorflow as tf\n'), ((130, 35, 130, 77), 'tensorflow.where', 'tf.where', ({(130, 44, 130, 48): 'cond', (130, 50, 130, 63): 'frag1_outputs', (130, 65, 130, 76): 'frag1_zeros'}, {}), '(cond, frag1_outputs, frag1_zeros)', True, 'import tensorflow as tf\n'), ((132, 35, 132, 77), 'tensorflow.where', 'tf.where', ({(132, 44, 132, 48): 'cond', (132, 50, 132, 63): 'frag2_outputs', (132, 65, 132, 76): 'frag2_zeros'}, {}), '(cond, frag2_outputs, frag2_zeros)', True, 'import tensorflow as tf\n'), ((135, 10, 135, 56), 'tensorflow.where', 'tf.where', ({(135, 19, 135, 23): 'cond', (135, 25, 135, 40): 'complex_outputs', (135, 42, 135, 55): 'complex_zeros'}, {}), '(cond, complex_outputs, complex_zeros)', True, 'import tensorflow as tf\n'), ((208, 21, 208, 47), 'itertools.product', 'itertools.product', ({(208, 39, 208, 46): '*radial'}, {}), '(*radial)', False, 'import itertools\n'), ((279, 20, 279, 61), 'numpy.zeros', 'np.zeros', ({(279, 29, 279, 60): '(batch_size, N_1, num_features)'}, {}), '((batch_size, N_1, num_features))', True, 'import numpy as np\n'), ((283, 20, 283, 61), 'numpy.zeros', 'np.zeros', ({(283, 29, 283, 60): '(batch_size, N_2, num_features)'}, {}), '((batch_size, N_2, num_features))', True, 'import numpy as np\n'), ((287, 22, 287, 61), 'numpy.zeros', 'np.zeros', ({(287, 31, 287, 60): '(batch_size, N, num_features)'}, {}), '((batch_size, N, num_features))', True, 'import numpy as np\n'), ((291, 21, 291, 51), 'numpy.zeros', 'np.zeros', ({(291, 30, 291, 50): '(batch_size, N_1, M)'}, {}), '((batch_size, N_1, M))', True, 'import numpy as np\n'), ((292, 20, 292, 47), 'numpy.zeros', 'np.zeros', ({(292, 29, 292, 46): '(batch_size, N_1)'}, {}), '((batch_size, N_1))', True, 'import numpy as np\n'), ((296, 23, 296, 53), 'numpy.zeros', 'np.zeros', ({(296, 32, 296, 52): '(batch_size, N_1, M)'}, {}), '((batch_size, N_1, M))', True, 'import numpy as np\n'), ((304, 21, 304, 51), 'numpy.zeros', 'np.zeros', ({(304, 30, 304, 50): '(batch_size, N_2, M)'}, {}), '((batch_size, N_2, M))', True, 'import numpy as np\n'), ((305, 20, 305, 47), 'numpy.zeros', 'np.zeros', ({(305, 29, 305, 46): '(batch_size, N_2)'}, {}), '((batch_size, N_2))', True, 'import numpy as np\n'), ((309, 23, 309, 53), 'numpy.zeros', 'np.zeros', ({(309, 32, 309, 52): '(batch_size, N_2, M)'}, {}), '((batch_size, N_2, M))', True, 'import numpy as np\n'), ((317, 23, 317, 51), 'numpy.zeros', 'np.zeros', ({(317, 32, 317, 50): '(batch_size, N, M)'}, {}), '((batch_size, N, M))', True, 'import numpy as np\n'), ((318, 22, 318, 47), 'numpy.zeros', 'np.zeros', ({(318, 31, 318, 46): '(batch_size, N)'}, {}), '((batch_size, N))', True, 'import numpy as np\n'), ((322, 25, 322, 53), 'numpy.zeros', 'np.zeros', ({(322, 34, 322, 52): '(batch_size, N, M)'}, {}), '((batch_size, N, M))', True, 'import numpy as np\n'), ((335, 14, 335, 55), 'numpy.reshape', 'np.reshape', (), '', True, 'import numpy as np\n'), ((105, 12, 105, 65), 'tensorflow.matmul', 'tf.matmul', ({(105, 22, 105, 32): 'prev_layer', (105, 34, 105, 64): 'self.type_weights[atomtype][i]'}, {}), '(prev_layer, self.type_weights[atomtype][i])', True, 'import tensorflow as tf\n'), ((112, 14, 112, 69), 'tensorflow.matmul', 'tf.matmul', ({(112, 24, 112, 34): 'prev_layer', (112, 36, 112, 68): 'self.output_weights[atomtype][0]'}, {}), '(prev_layer, self.output_weights[atomtype][0])', True, 'import tensorflow as tf\n'), ((84, 20, 86, 46), 'tensorflow.random.truncated_normal', 'tf.random.truncated_normal', (), '', True, 'import tensorflow as tf\n'), ((87, 19, 88, 66), 'tensorflow.constant', 'tf.constant', (), '', True, 'import tensorflow as tf\n'), ((300, 51, 300, 70), 'numpy.array', 'np.array', ({(300, 60, 300, 69): 'atom_nbrs'}, {}), '(atom_nbrs)', True, 'import numpy as np\n'), ((313, 51, 313, 70), 'numpy.array', 'np.array', ({(313, 60, 313, 69): 'atom_nbrs'}, {}), '(atom_nbrs)', True, 'import numpy as np\n'), ((326, 53, 326, 72), 'numpy.array', 'np.array', ({(326, 62, 326, 71): 'atom_nbrs'}, {}), '(atom_nbrs)', True, 'import numpy as np\n')]
cotobadesign/cotoba-agent-oss
dialogue-engine/test/programytest/config/brain/test_oob.py
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
""" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import unittest from programy.config.file.yaml_file import YamlConfigurationFile from programy.config.brain.oob import BrainOOBConfiguration from programy.clients.events.console.config import ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(""" brain: oobs: default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor """, ConsoleConfiguration(), ".") brain_config = yaml.get_section("brain") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section("oobs", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration("default") oob_config.load_config_section(yaml, oobs_config, ".") self.assertEqual("programy.oob.defaults.default.DefaultOutOfBandProcessor", oob_config.classname) def test_default_without_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(""" brain: oobs: default: """, ConsoleConfiguration(), ".") brain_config = yaml.get_section("brain") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section("oobs", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration("default") oob_config.load_config_section(yaml, oobs_config, ".") self.assertIsNone(oob_config.classname)
[((26, 15, 26, 38), 'programy.config.file.yaml_file.YamlConfigurationFile', 'YamlConfigurationFile', ({}, {}), '()', False, 'from programy.config.file.yaml_file import YamlConfigurationFile\n'), ((40, 21, 40, 53), 'programy.config.brain.oob.BrainOOBConfiguration', 'BrainOOBConfiguration', ({(40, 43, 40, 52): '"""default"""'}, {}), "('default')", False, 'from programy.config.brain.oob import BrainOOBConfiguration\n'), ((46, 15, 46, 38), 'programy.config.file.yaml_file.YamlConfigurationFile', 'YamlConfigurationFile', ({}, {}), '()', False, 'from programy.config.file.yaml_file import YamlConfigurationFile\n'), ((59, 21, 59, 53), 'programy.config.brain.oob.BrainOOBConfiguration', 'BrainOOBConfiguration', ({(59, 43, 59, 52): '"""default"""'}, {}), "('default')", False, 'from programy.config.brain.oob import BrainOOBConfiguration\n'), ((33, 13, 33, 35), 'programy.clients.events.console.config.ConsoleConfiguration', 'ConsoleConfiguration', ({}, {}), '()', False, 'from programy.clients.events.console.config import ConsoleConfiguration\n'), ((52, 13, 52, 35), 'programy.clients.events.console.config.ConsoleConfiguration', 'ConsoleConfiguration', ({}, {}), '()', False, 'from programy.clients.events.console.config import ConsoleConfiguration\n')]
candyninja001/pypad
pypad/active_skill/interfaces/orb_generator_asi.py
82bfc104c2524ca54cc415d37d2c21fec471838f
import abc from ...orb_attribute import OrbAttribute # Interface for active skills that create specific orb types (whether board change, orb change, orb spawn, etc) class OrbGeneratorASI(abc.ABC): @abc.abstractmethod def does_orb_generator_create_orb_attribute(self, orb_attribute: OrbAttribute) -> bool: pass
[]
DivoK/mystery
setup.py
b656eebe678c64864b2a5762765f36bddd540933
""" Core business logic for `mystery`. This code will run when the package is being built and installed. """ import json import pathlib import random import tempfile import urllib.request import typing import setuptools from setuptools.command.sdist import sdist # Load the configuration file. CONFIG_PATH = pathlib.Path('config.json') CONFIG = json.load(CONFIG_PATH.open('r')) def _get_lockfile_path() -> pathlib.Path: """ Assemble the lockfile's path. :return: lockfile path. :rtype: pathlib.Path """ return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class SDistCommand(sdist): """ Will be registered as a replacement for pip's 'sdist' command. """ def run(self): dep_lock_path = _get_lockfile_path() try: dep_lock_path.unlink() except FileNotFoundError: pass super().run() def _get_package_list() -> typing.List[str]: """ Get a list of possible packages. :return: list of package names. :rtype: typing.List[str] """ try: # Get the top PyPI packages and use one of them. response = urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw = response.read() except urllib.request.URLError: # Use the offline backup file. with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file: possible_packages_raw = backup_file.read() return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']] def _choose_mystery_package() -> str: """ Choose the underlying mysterious package and handle the lockfile's state. :return: mystery package name. :rtype: str """ # To keep the chosen dependency consistent in between setup.py runs, 'mystery' uses a temporary lockfile. dep_lock_path = _get_lockfile_path() if dep_lock_path.exists(): # Use the locked package and unlink the lockfile. chosen_package = dep_lock_path.read_text().strip() dep_lock_path.unlink() else: # Choose a package and create the lockfile. possible_packages = _get_package_list() chosen_package = random.choice( [package['project'] for package in possible_packages] ) dep_lock_path.write_text(chosen_package) # Lock the chosen package of course. return chosen_package def _fix_package_name(package_name: str) -> str: """ Fix the package name so it could be placed in the __init__.py file. :param package_name: mystery package name. :type package_name: str :return: fixed mystery package name. :rtype: str """ # Transform to eligible package name. fixed_package_name = package_name.replace('-', '_') # Special case for the 'backports' modules. if fixed_package_name.startswith('backports_'): fixed_package_name.replace('_', '.', 1) return fixed_package_name def _write_init_py(package_name: str) -> None: """ Dynamically write the __init__.py for the package using the chosen package. :param chosen_package: mystery package name. :type chosen_package: str :rtype: None """ package_name = _fix_package_name(package_name) init_py_path = pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True) init_py_path = init_py_path / '__init__.py' init_py_path.write_text( f''' # Here we're trying to import the mystery package (it's "{package_name}" this time). # If it exists, overwrite 'mystery' in 'sys.modules'. Else, print there was an error. import sys try: import {package_name} except ImportError as error: print('Internal error:', error) print("The mystery package wasn't playing nice. Sorry!") print('Hint: you can always try to reinstall mystery and get a different package!') sorry = 'try reinstalling mystery and get a different package!' else: sys.modules['mystery'] = {package_name} sys.modules['mystery'].__mystery_init_py__ = __file__ sys.modules['mystery'].__mystery_package_name__ = '{package_name}' del sys # We care about this only when mystery fails (and even that's inconsequential). ''' ) def _get_long_description_data() -> typing.Tuple[str, str]: """ Get data regarding the long description of the package. :return: tuple of the README.md text and the long_description type. :rtype: typing.Tuple[str, str] """ with open('README.md', 'r') as readme: return (readme.read(), 'text/markdown') CHOSEN_PACKAGE = _choose_mystery_package() _write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data() setuptools.setup( name='mystery', version='1.0.2', description='It is a riddle, wrapped in a mystery, inside an enigma.', url='https://github.com/DivoK/mystery', author='Divo Kaplan', author_email='[email protected]', packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist': SDistCommand}, python_requires='>=3.6', include_package_data=True, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery setuptools fun python-packages random', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Intended Audience :: Other Audience', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
[((17, 14, 17, 41), 'pathlib.Path', 'pathlib.Path', ({(17, 27, 17, 40): '"""config.json"""'}, {}), "('config.json')", False, 'import pathlib\n'), ((112, 19, 112, 42), 'pathlib.Path', 'pathlib.Path', ({(112, 32, 112, 41): '"""mystery"""'}, {}), "('mystery')", False, 'import pathlib\n'), ((79, 25, 81, 9), 'random.choice', 'random.choice', ({(80, 12, 80, 65): "[package['project'] for package in possible_packages]"}, {}), "([package['project'] for package in possible_packages])", False, 'import random\n'), ((159, 13, 159, 39), 'setuptools.find_packages', 'setuptools.find_packages', ({}, {}), '()', False, 'import setuptools\n'), ((60, 11, 60, 44), 'json.loads', 'json.loads', ({(60, 22, 60, 43): 'possible_packages_raw'}, {}), '(possible_packages_raw)', False, 'import json\n'), ((28, 24, 28, 45), 'tempfile.gettempdir', 'tempfile.gettempdir', ({}, {}), '()', False, 'import tempfile\n')]
CrazyIvanPro/Optimal_Transport
ADMM_primal.py
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
#!/usr/bin/env python # -*- coding: utf-8 -*- # ======================================= # File Name: ADMM_primal.py # Purpose : implementation for ADMM method # for solving primal problem # ======================================= from utils import get_params import numpy as np import sys def ADMM_primal(mu, nu, c, iters=10000, rho=1024, alpha=1.618): """ADMM_primal """ # initialize m, n = c.shape pi = np.zeros((m, n)) pi_dag = np.zeros((m, n)) w = np.zeros((m, n)) u = np.zeros(m) v = np.zeros(n) rho_tilde = rho * 32 while rho_tilde >= rho: for _ in range(iters): r = ((-w + u.reshape((m, 1)) + v.reshape((1, n)) - c) / rho + mu.reshape((m, 1)) + nu.reshape((1, n)) + pi_dag) pi = (r - ((r.sum(axis=1) - r.sum() / (m + n + 1)) / (n + 1)).reshape((m, 1)) - ((r.sum(axis=0) - r.sum() / (m + n + 1)) / (m + 1)).reshape((1, n))) pi_dag = np.maximum(pi + w / rho, 0.0) u = u + alpha * rho * (mu - pi.sum(axis=1)) v = v + alpha * rho * (nu - pi.sum(axis=0)) w = w + alpha * rho * (pi - pi_dag) rho_tilde = rho_tilde / 2 print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 1) - mu, 1)) print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 0) - nu, 1)) print('fvall = %.5e' % (c * pi_dag).sum()) if __name__ == '__main__': try: print("Test...") _mu, _nu, _c = get_params(64, 'random') ADMM_primal(_mu, _nu, _c) except KeyboardInterrupt: print (" Ctrl+C pressed...") sys.exit(1)
[((19, 9, 19, 25), 'numpy.zeros', 'np.zeros', ({(19, 18, 19, 24): '(m, n)'}, {}), '((m, n))', True, 'import numpy as np\n'), ((20, 13, 20, 29), 'numpy.zeros', 'np.zeros', ({(20, 22, 20, 28): '(m, n)'}, {}), '((m, n))', True, 'import numpy as np\n'), ((21, 8, 21, 24), 'numpy.zeros', 'np.zeros', ({(21, 17, 21, 23): '(m, n)'}, {}), '((m, n))', True, 'import numpy as np\n'), ((22, 8, 22, 19), 'numpy.zeros', 'np.zeros', ({(22, 17, 22, 18): 'm'}, {}), '(m)', True, 'import numpy as np\n'), ((23, 8, 23, 19), 'numpy.zeros', 'np.zeros', ({(23, 17, 23, 18): 'n'}, {}), '(n)', True, 'import numpy as np\n'), ((50, 23, 50, 47), 'utils.get_params', 'get_params', ({(50, 34, 50, 36): '64', (50, 38, 50, 46): '"""random"""'}, {}), "(64, 'random')", False, 'from utils import get_params\n'), ((34, 21, 34, 50), 'numpy.maximum', 'np.maximum', ({(34, 32, 34, 44): 'pi + w / rho', (34, 46, 34, 49): '0.0'}, {}), '(pi + w / rho, 0.0)', True, 'import numpy as np\n'), ((54, 8, 54, 19), 'sys.exit', 'sys.exit', ({(54, 17, 54, 18): '(1)'}, {}), '(1)', False, 'import sys\n')]
pombase/legacy-eg-loader
misc_scripts/CleanVCFparams.py
1a324121325ffc3b9a4c15922f7a12756a9c3206
#!/usr/bin/python import os import sys import pprint import argparse parser = argparse.ArgumentParser(description='Clean up the data for a given parameter') parser.add_argument('--infile', help="Path to the VCF file", default='test.vcf') parser.add_argument('--outfile', help="Path to the new VCF file", default='test.out.vcf') parser.add_argument('--param', help="Parameter to clean", default='PL') args = parser.parse_args() fi = open(args.infile, 'r') #fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w') fo = open(args.outfile, 'w') for line in fi: if len(line) == 0: continue if line[0] == '#': fo.write(line) continue line = line.rstrip() v = line.split('\t'); params = v[8].split(':') out = v[0:8] try: paramIndex = params.index(args.param) del params[paramIndex] out.append(':'.join(params)) for d in v[9:]: dv = d.split(':') del dv[paramIndex] out.append(':'.join(dv)) except ValueError: out.append(':'.join(params)) out += v[9:] fo.write("\t".join(out) + "\n") fi.close() fo.close()
[((8, 9, 8, 87), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n')]
UKPLab/acl20-dialogue-coherence-assessment
create_coherency_dataset.py
328b888855dc833b4b0c05c259ee7115f4219dbe
import math import os from copy import deepcopy from ast import literal_eval import pandas as pd from math import factorial import random from collections import Counter, defaultdict import sys from nltk import word_tokenize from tqdm import tqdm, trange import argparse import numpy as np import re import csv from sklearn.model_selection import train_test_split from swda.swda import CorpusReader, Transcript, Utterance act2word = {1:"inform",2:"question", 3:"directive", 4:"commissive"} def permute(sents, sent_DAs, amount): """ return a list of different! permuted sentences and their respective dialog acts """ """ if amount is greater than the possible amount of permutations, only the uniquely possible ones are returned """ assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal" if amount == 0: return [] permutations = [list(range(len(sents)))] amount = min(amount, factorial(len(sents))-1) for i in range(amount): permutation = np.random.permutation(len(sents)) while permutation.tolist() in permutations: permutation = np.random.permutation(len(sents)) permutations.append(permutation.tolist()) return permutations[1:] #the first one is the original, which was included s.t. won't be generated def draw_rand_sent(act_utt_df, sent_len, amount): """ df is supposed to be a pandas dataframe with colums 'act' and 'utt' (utterance), with act being a number from 1 to 4 and utt being a sentence """ permutations = [] for _ in range(amount): (utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df) sent_insert_ix = random.randint(0, sent_len-1) permutations.append((utt, da, name, ix, sent_insert_ix)) return permutations def draw_rand_sent_from_df(df): ix = random.randint(0, len(df['utt'])-1) return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix] def half_perturb(sents, sent_DAs, amount): assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal" permutations = [list(range(len(sents)))] for _ in range(amount): while True: speaker = random.randint(0,1) # choose one of the speakers speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0, range(len(sents)))) permuted_speaker_ix = np.random.permutation(speaker_ix) new_sents = list(range(len(sents))) for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix): new_sents[i_to] = i_from if (not new_sents == permutations[0]) and ( not new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))): permutations.append(new_sents) break return permutations[1:] def utterance_insertions(length, amount): possible_permutations = [] original = list(range(length)) for ix in original: for y in range(length): if ix == y: continue ix_removed = original[0:ix] + ([] if ix == length-1 else original[ix+1:]) ix_removed.insert(y, ix) possible_permutations.append(deepcopy(ix_removed)) permutations = [] for _ in range(amount): i = random.randint(0, len(possible_permutations)-1) permutations.append(possible_permutations[i]) return permutations class DailyDialogConverter: def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True): self.data_dir = data_dir self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer = tokenizer self.word2id = word2id self.output_file = None self.task = task self.ranking_dataset = ranking_dataset self.perturbation_statistics = 0 self.setname = os.path.split(data_dir)[1] assert self.setname == 'train' or self.setname == 'validation' or self.setname == 'test', "wrong data dir name" def create_act_utt(self): dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname)) act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname)) output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task)) df = open(dial_file, 'r') af = open(act_file, 'r') of = open(output_file, 'w') csv_writer = csv.writer(of, delimiter='|') for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118): seqs = dial.split('__eou__') seqs = seqs[:-1] if len(seqs) < 5: continue tok_seqs = [self.tokenizer(seq) for seq in seqs] tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs] tok_seqs = [self.word2id(seq) for seq in tok_seqs] acts = act.split(' ') acts = acts[:-1] acts = [int(act) for act in acts] for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)): dialog_name = "{}_{}".format(self.setname, line_count) row = (act, utt, dialog_name,utt_i) csv_writer.writerow(row) def convert_dset(self, amounts): # data_dir is supposed to be the dir with the respective train/test/val-dataset files print("Creating {} perturbations for task {}".format(amounts, self.task)) dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname)) act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname)) self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task)) root_data_dir = os.path.split(self.data_dir)[0] shuffled_path = os.path.join(root_data_dir, "shuffled_{}".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert os.path.isfile(dial_file) and os.path.isfile(act_file), "could not find input files" assert os.path.isfile(self.act_utt_file), "missing act_utt.txt in data_dir" with open(self.act_utt_file, 'r') as f: act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix']) rand_generator = lambda: draw_rand_sent_from_df(act_utt_df) df = open(dial_file, 'r') af = open(act_file, 'r') of = open(self.output_file, 'w') discarded = 0 for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118): seqs = dial.split('__eou__') seqs = seqs[:-1] if len(seqs) < 5: discarded += 1 continue tok_seqs = [self.tokenizer(seq) for seq in seqs] tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs] tok_seqs = [self.word2id(seq) for seq in tok_seqs] acts = act.split(' ') acts = acts[:-1] acts = [int(act) for act in acts] if self.task == 'up': permuted_ixs = permute(tok_seqs, acts, amounts) elif self.task == 'us': permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts) elif self.task == 'hup': permuted_ixs = half_perturb(tok_seqs, acts, amounts) elif self.task == 'ui': permuted_ixs = utterance_insertions(len(tok_seqs), amounts) shuffle_file = os.path.join(shuffled_path, "{}_{}.csv".format(self.setname, line_count)) with open(shuffle_file, "w") as f: csv_writer = csv.writer(f) for perm in permuted_ixs: if self.task == 'us': (utt, da, name, ix, insert_ix) = perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: csv_writer.writerow(perm) self.perturbation_statistics += len(permuted_ixs) if self.task == 'us': for p in permuted_ixs: (insert_sent, insert_da, name, ix, insert_ix) = p a = " ".join([str(a) for a in acts]) u = str(tok_seqs) p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa = " ".join([str(a) for a in p_a]) p_u = deepcopy(tok_seqs) p_u[insert_ix] = self.word2id(insert_sent) of.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u)) of.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u)) else: for p in permuted_ixs: a = " ".join([str(a) for a in acts]) u = str(tok_seqs) pa = [acts[i] for i in p] p_a = " ".join([str(a) for a in pa]) pu = [tok_seqs[i] for i in p] p_u = str(pu) of.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u)) of.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u)) print(discarded) class SwitchboardConverter: def __init__(self, data_dir, tokenizer, word2id, task='', seed=42): self.corpus = CorpusReader(data_dir) self.data_dir = data_dir self.tokenizer = tokenizer self.word2id = word2id self.task = task self.utt_num = 0 for utt in self.corpus.iter_utterances(): self.utt_num += 1 self.trans_num = 0 for trans in self.corpus.iter_transcripts(): self.trans_num += 1 self.da2num = switchboard_da_mapping() # CAUTION: make sure that for each task the seed is the same s.t. the splits will be the same! train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed) val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed) self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs self.utt_da_pairs = [] prev_da = "%" for i, utt in enumerate(self.corpus.iter_utterances()): sentence = re.sub(r"([+/\}\[\]]|\{\w)", "", utt.text) sentence = self.word2id(self.tokenizer(sentence)) act = utt.damsl_act_tag() if act == None: act = "%" if act == "+": act = prev_da _, swda_name = os.path.split(utt.swda_filename) swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name ix = utt.utterance_index self.utt_da_pairs.append((sentence, act, swda_name, ix)) def draw_rand_sent(self): r = random.randint(0, len(self.utt_da_pairs)-1) return self.utt_da_pairs[r] def create_vocab(self): print("Creating Vocab file for Switchboard") cnt = Counter() for utt in self.corpus.iter_utterances(): sentence = re.sub(r"([+/\}\[\]]|\{\w)", "", utt.text) sentence = self.tokenizer(sentence) for w in sentence: cnt[w] += 1 itos_file = os.path.join(self.data_dir, "itos.txt") itosf = open(itos_file, "w") for (word, _) in cnt.most_common(25000): itosf.write("{}\n".format(word)) #getKeysByValue def swda_permute(self, sents, amount, speaker_ixs): if amount == 0: return [] permutations = [list(range(len(sents)))] segment_permutations = [] amount = min(amount, factorial(len(sents))-1) segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) for i in range(amount): while True: permutation = [] segm_perm = np.random.permutation(len(segments)) segment_permutations.append(segm_perm) for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs if permutation not in permutations: break permutations.append(permutation) return permutations[1:] , segment_permutations #the first one is the original, which was included s.t. won't be generated def speaker_segment_ixs(self, speaker_ixs): i = 0 segment_indices = dict() prev_speaker = speaker_ixs[0] for j,speaker in enumerate(speaker_ixs): if speaker != prev_speaker: prev_speaker = speaker i += 1 segment_indices[j] = i return segment_indices def swda_half_perturb(self, amount, speaker_ixs): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) segment_permutations = [] permutations = [list(segm_ixs.keys())] for _ in range(amount): speaker = random.randint(0,1) # choose one of the speakers speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0, segments)) speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0, segments)) #TODO: rename either speaker_ix or speaker_ixs, they are something different, but the names are too close if len(speaker_to_perm) < 2: return [] while True: permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist() new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix)) if speaker == 0 : new_segments[::2] = permuted_speaker_ix new_segments[1::2] = speaker_orig else: new_segments[1::2] = permuted_speaker_ix new_segments[::2] = speaker_orig segment_permutations.append(new_segments) permutation = [] for segm_ix in new_segments: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs if not permutation in permutations: permutations.append(permutation) break return permutations[1:], segment_permutations def swda_utterance_insertion(self, speaker_ixs, amounts): segment_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segment_ixs.values())) segment_permutations = [] permutations = [] i = 0 for _ in range(amounts): while True: # actually: do ... while permutation not in permutations i_from = random.randint(0, len(segments)-1) i_to = random.randint(0, len(segments)-2) segm_perm = deepcopy(segments) rem_elem = segments[i_from] segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:] segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:] permutation = [] for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix)) permutation = permutation + utt_ixs if permutation not in permutations: permutations.append(permutation) segment_permutations.append(segm_perm) break return permutations, segment_permutations def swda_utterance_sampling(self, speaker_ixs, amount): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) permutations = [] for i in range(amount): (sentence, act, swda_name, ix) = self.draw_rand_sent() insert_ix = random.choice(segments) permutations.append((sentence, act, swda_name, ix, insert_ix)) return permutations def convert_dset(self, amounts): # create distinct train/validation/test files. they'll correspond to the created # splits from the constructor train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task)) val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task)) test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task)) if not os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir, 'train')) if not os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir, 'validation')) if not os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir, 'test')) trainfile = open(train_output_file, 'w') valfile = open(val_output_file, 'w') testfile = open(test_output_file, 'w') shuffled_path = os.path.join(self.data_dir, "shuffled_{}".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances = [] acts = [] speaker_ixs = [] prev_act = "%" for utt in trans.utterances: sentence = re.sub(r"([+/\}\[\]]|\{\w)", "", utt.text) sentence = self.word2id(self.tokenizer(sentence)) utterances.append(sentence) act = utt.damsl_act_tag() if act == None: act = "%" if act == "+": act = prev_act acts.append(self.da2num[act]) prev_act = act if "A" in utt.caller: speaker_ixs.append(0) else: speaker_ixs.append(1) if self.task == 'up': permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs) elif self.task == 'us': permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts) elif self.task == 'hup': permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs) elif self.task == 'ui': permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname = os.path.split(trans.swda_filename)[1] shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4] with open(shuffle_file, "w") as f: csv_writer = csv.writer(f) if self.task == 'us': for perm in permuted_ixs: (utt, da, name, ix, insert_ix) = perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: for perm in segment_perms: csv_writer.writerow(perm) if self.task == 'us': for p in permuted_ixs: a = " ".join([str(x) for x in acts]) u = str(utterances) insert_sent, insert_da, name, ix, insert_ix = p insert_da = self.da2num[insert_da] p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa = " ".join([str(x) for x in p_a]) p_u = deepcopy(utterances) p_u[insert_ix] = insert_sent if i in self.train_ixs: trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u)) trainfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u)) if i in self.val_ixs: valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u)) valfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u)) if i in self.test_ixs: testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u)) testfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u)) else: for p in permuted_ixs: a = " ".join([str(x) for x in acts]) u = str(utterances) pa = [acts[i] for i in p] p_a = " ".join([str(x) for x in pa]) pu = [utterances[i] for i in p] p_u = str(pu) if i in self.train_ixs: trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u)) trainfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u)) if i in self.val_ixs: valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u)) valfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u)) if i in self.test_ixs: testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u)) testfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u)) def main(): parser = argparse.ArgumentParser() parser.add_argument("--datadir", required=True, type=str, help="""The input directory where the files of the corpus are located. """) parser.add_argument("--corpus", required=True, type=str, help="""the name of the corpus to use, currently either 'DailyDialog' or 'Switchboard' """) parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--amount', type=int, default=20, help="random seed for initialization") parser.add_argument('--word2id', action='store_true', help= "convert the words to ids") parser.add_argument('--task', required=True, type=str, default="up", help="""for which task the dataset should be created. alternatives: up (utterance permutation) us (utterance sampling) hup (half utterance petrurbation) ui (utterance insertion, nothing directly added!)""") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) if args.word2id: f = open(os.path.join(args.datadir, "itos.txt"), "r") word2id_dict = dict() for i, word in enumerate(f): word2id_dict[word[:-1].lower()] = i word2id = lambda x: [word2id_dict[y] for y in x] # don't convert words to ids (yet). It gets done in the glove wrapper of mtl_coherence.py else: word2id = lambda x: x tokenizer = word_tokenize if args.corpus == 'DailyDialog': converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task) converter.create_act_utt() elif args.corpus == 'Switchboard': converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount) def getKeysByValue(dictOfElements, valueToFind): listOfKeys = list() for item in dictOfElements.items(): if item[1] == valueToFind: listOfKeys.append(item[0]) return listOfKeys def switchboard_da_mapping(): mapping_dict = dict({ "sd": 1, "b": 2, "sv": 3, "aa": 4, "%-": 5, "ba": 6, "qy": 7, "x": 8, "ny": 9, "fc": 10, "%": 11, "qw": 12, "nn": 13, "bk": 14, "h": 15, "qy^d": 16, "o": 17, "bh": 18, "^q": 19, "bf": 20, "na": 21, "ny^e": 22, "ad": 23, "^2": 24, "b^m": 25, "qo": 26, "qh": 27, "^h": 28, "ar": 29, "ng": 30, "nn^e": 31, "br": 32, "no": 33, "fp": 34, "qrr": 35, "arp": 36, "nd": 37, "t3": 38, "oo": 39, "co": 40, "cc": 41, "t1": 42, "bd": 43, "aap": 44, "am": 45, "^g": 46, "qw^d": 47, "fa": 48, "ft":49 }) d = defaultdict(lambda: 11) for (k, v) in mapping_dict.items(): d[k] = v return d if __name__ == "__main__": main()
[((514, 13, 514, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((547, 4, 547, 26), 'random.seed', 'random.seed', ({(547, 16, 547, 25): 'args.seed'}, {}), '(args.seed)', False, 'import random\n'), ((548, 4, 548, 29), 'numpy.random.seed', 'np.random.seed', ({(548, 19, 548, 28): 'args.seed'}, {}), '(args.seed)', True, 'import numpy as np\n'), ((629, 8, 629, 31), 'collections.defaultdict', 'defaultdict', ({(629, 20, 629, 30): 'lambda : 11'}, {}), '(lambda : 11)', False, 'from collections import Counter, defaultdict\n'), ((47, 25, 47, 54), 'random.randint', 'random.randint', ({(47, 40, 47, 41): '0', (47, 43, 47, 53): 'sent_len - 1'}, {}), '(0, sent_len - 1)', False, 'import random\n'), ((53, 11, 53, 38), 'ast.literal_eval', 'literal_eval', ({(53, 24, 53, 37): "df['utt'][ix]"}, {}), "(df['utt'][ix])", False, 'from ast import literal_eval\n'), ((98, 28, 98, 70), 'os.path.join', 'os.path.join', ({(98, 41, 98, 49): 'data_dir', (98, 51, 98, 69): '"""act_utt_name.txt"""'}, {}), "(data_dir, 'act_utt_name.txt')", False, 'import os\n'), ((118, 21, 118, 50), 'csv.writer', 'csv.writer', (), '', False, 'import csv\n'), ((154, 15, 154, 48), 'os.path.isfile', 'os.path.isfile', ({(154, 30, 154, 47): 'self.act_utt_file'}, {}), '(self.act_utt_file)', False, 'import os\n'), ((233, 22, 233, 44), 'swda.swda.CorpusReader', 'CorpusReader', ({(233, 35, 233, 43): 'data_dir'}, {}), '(data_dir)', False, 'from swda.swda import CorpusReader, Transcript, Utterance\n'), ((251, 28, 251, 102), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((279, 14, 279, 23), 'collections.Counter', 'Counter', ({}, {}), '()', False, 'from collections import Counter, defaultdict\n'), ((287, 20, 287, 59), 'os.path.join', 'os.path.join', ({(287, 33, 287, 46): 'self.data_dir', (287, 48, 287, 58): '"""itos.txt"""'}, {}), "(self.data_dir, 'itos.txt')", False, 'import os\n'), ((62, 22, 62, 41), 'random.randint', 'random.randint', ({(62, 37, 62, 38): '0', (62, 39, 62, 40): '1'}, {}), '(0, 1)', False, 'import random\n'), ((65, 34, 65, 67), 'numpy.random.permutation', 'np.random.permutation', ({(65, 56, 65, 66): 'speaker_ix'}, {}), '(speaker_ix)', True, 'import numpy as np\n'), ((107, 23, 107, 46), 'os.path.split', 'os.path.split', ({(107, 37, 107, 45): 'data_dir'}, {}), '(data_dir)', False, 'import os\n'), ((148, 24, 148, 52), 'os.path.split', 'os.path.split', ({(148, 38, 148, 51): 'self.data_dir'}, {}), '(self.data_dir)', False, 'import os\n'), ((150, 15, 150, 43), 'os.path.isdir', 'os.path.isdir', ({(150, 29, 150, 42): 'shuffled_path'}, {}), '(shuffled_path)', False, 'import os\n'), ((151, 12, 151, 35), 'os.mkdir', 'os.mkdir', ({(151, 21, 151, 34): 'shuffled_path'}, {}), '(shuffled_path)', False, 'import os\n'), ((153, 15, 153, 40), 'os.path.isfile', 'os.path.isfile', ({(153, 30, 153, 39): 'dial_file'}, {}), '(dial_file)', False, 'import os\n'), ((153, 45, 153, 69), 'os.path.isfile', 'os.path.isfile', ({(153, 60, 153, 68): 'act_file'}, {}), '(act_file)', False, 'import os\n'), ((157, 25, 157, 85), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((257, 23, 258, 37), 're.sub', 're.sub', ({(257, 30, 257, 50): '"""([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)"""', (257, 52, 257, 54): '""""""', (258, 28, 258, 36): 'utt.text'}, {}), "('([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)', '', utt.text)", False, 'import re\n'), ((265, 27, 265, 59), 'os.path.split', 'os.path.split', ({(265, 41, 265, 58): 'utt.swda_filename'}, {}), '(utt.swda_filename)', False, 'import os\n'), ((281, 23, 282, 37), 're.sub', 're.sub', ({(281, 30, 281, 50): '"""([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)"""', (281, 52, 281, 54): '""""""', (282, 28, 282, 36): 'utt.text'}, {}), "('([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)', '', utt.text)", False, 'import re\n'), ((337, 22, 337, 41), 'random.randint', 'random.randint', ({(337, 37, 337, 38): '0', (337, 39, 337, 40): '1'}, {}), '(0, 1)', False, 'import random\n'), ((403, 24, 403, 47), 'random.choice', 'random.choice', ({(403, 38, 403, 46): 'segments'}, {}), '(segments)', False, 'import random\n'), ((426, 15, 426, 43), 'os.path.isdir', 'os.path.isdir', ({(426, 29, 426, 42): 'shuffled_path'}, {}), '(shuffled_path)', False, 'import os\n'), ((427, 12, 427, 35), 'os.mkdir', 'os.mkdir', ({(427, 21, 427, 34): 'shuffled_path'}, {}), '(shuffled_path)', False, 'import os\n'), ((459, 27, 459, 66), 'os.path.join', 'os.path.join', ({(459, 40, 459, 53): 'shuffled_path', (459, 55, 459, 65): 'swda_fname'}, {}), '(shuffled_path, swda_fname)', False, 'import os\n'), ((551, 17, 551, 55), 'os.path.join', 'os.path.join', ({(551, 30, 551, 42): 'args.datadir', (551, 44, 551, 54): '"""itos.txt"""'}, {}), "(args.datadir, 'itos.txt')", False, 'import os\n'), ((86, 41, 86, 61), 'copy.deepcopy', 'deepcopy', ({(86, 50, 86, 60): 'ix_removed'}, {}), '(ix_removed)', False, 'from copy import deepcopy\n'), ((194, 29, 194, 42), 'csv.writer', 'csv.writer', ({(194, 40, 194, 41): 'f'}, {}), '(f)', False, 'import csv\n'), ((378, 28, 378, 46), 'copy.deepcopy', 'deepcopy', ({(378, 37, 378, 45): 'segments'}, {}), '(segments)', False, 'from copy import deepcopy\n'), ((414, 30, 414, 66), 'os.path.join', 'os.path.join', ({(414, 43, 414, 56): 'self.data_dir', (414, 58, 414, 65): '"""train"""'}, {}), "(self.data_dir, 'train')", False, 'import os\n'), ((415, 24, 415, 60), 'os.path.join', 'os.path.join', ({(415, 37, 415, 50): 'self.data_dir', (415, 52, 415, 59): '"""train"""'}, {}), "(self.data_dir, 'train')", False, 'import os\n'), ((416, 30, 416, 71), 'os.path.join', 'os.path.join', ({(416, 43, 416, 56): 'self.data_dir', (416, 58, 416, 70): '"""validation"""'}, {}), "(self.data_dir, 'validation')", False, 'import os\n'), ((417, 24, 417, 65), 'os.path.join', 'os.path.join', ({(417, 37, 417, 50): 'self.data_dir', (417, 52, 417, 64): '"""validation"""'}, {}), "(self.data_dir, 'validation')", False, 'import os\n'), ((418, 30, 418, 65), 'os.path.join', 'os.path.join', ({(418, 43, 418, 56): 'self.data_dir', (418, 58, 418, 64): '"""test"""'}, {}), "(self.data_dir, 'test')", False, 'import os\n'), ((419, 24, 419, 59), 'os.path.join', 'os.path.join', ({(419, 37, 419, 50): 'self.data_dir', (419, 52, 419, 58): '"""test"""'}, {}), "(self.data_dir, 'test')", False, 'import os\n'), ((435, 27, 436, 41), 're.sub', 're.sub', ({(435, 34, 435, 54): '"""([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)"""', (435, 56, 435, 58): '""""""', (436, 32, 436, 40): 'utt.text'}, {}), "('([+/\\\\}\\\\[\\\\]]|\\\\{\\\\w)', '', utt.text)", False, 'import re\n'), ((458, 25, 458, 59), 'os.path.split', 'os.path.split', ({(458, 39, 458, 58): 'trans.swda_filename'}, {}), '(trans.swda_filename)', False, 'import os\n'), ((461, 29, 461, 42), 'csv.writer', 'csv.writer', ({(461, 40, 461, 41): 'f'}, {}), '(f)', False, 'import csv\n'), ((210, 26, 210, 40), 'copy.deepcopy', 'deepcopy', ({(210, 35, 210, 39): 'acts'}, {}), '(acts)', False, 'from copy import deepcopy\n'), ((213, 26, 213, 44), 'copy.deepcopy', 'deepcopy', ({(213, 35, 213, 43): 'tok_seqs'}, {}), '(tok_seqs)', False, 'from copy import deepcopy\n'), ((477, 26, 477, 40), 'copy.deepcopy', 'deepcopy', ({(477, 35, 477, 39): 'acts'}, {}), '(acts)', False, 'from copy import deepcopy\n'), ((480, 26, 480, 46), 'copy.deepcopy', 'deepcopy', ({(480, 35, 480, 45): 'utterances'}, {}), '(utterances)', False, 'from copy import deepcopy\n'), ((345, 38, 345, 76), 'numpy.random.permutation', 'np.random.permutation', ({(345, 60, 345, 75): 'speaker_to_perm'}, {}), '(speaker_to_perm)', True, 'import numpy as np\n')]
swimmio/sqlalchemy_swimm
tests/test_selections.py
d24accb7792743cf586bd7062531d108e7063eba
import typing import pytest from src import selections @pytest.mark.parametrize( 'min_time, min_bytes, expected_result', [ ( 10 * 60 * 1000, 500 * 1024 * 1024, [ (2820,), (2827,), (2832,), (2834,), (2842,), (2844,), (2851,), (2852,), (2859,), (2862,), (2872,), (2878,), (2881,), (2890,), (2897,), (2899,), (2902,), (2903,), (2907,), (2910,), (2918,), (2920,), (3166,), (3167,), (3224,), (3226,), (3228,), (3229,), (3230,), (3231,), (3233,), (3234,), (3235,), (3236,), (3239,), (3242,), (3243,), (3244,), (3245,), (3246,), (3247,), (3249,), (3251,), (3338,), ], ), ( 5 * 60 * 1000, 50 * 1024 * 1024, [ (1666,), (2819,), (2820,), (2821,), (2822,), (2823,), (2824,), (2825,), (2826,), (2827,), (2828,), (2829,), (2830,), (2831,), (2832,), (2833,), (2834,), (2835,), (2836,), (2837,), (2838,), (2839,), (2840,), (2841,), (2842,), (2843,), (2844,), (2845,), (2846,), (2847,), (2848,), (2849,), (2850,), (2851,), (2852,), (2853,), (2854,), (2855,), (2856,), (2857,), (2858,), (2859,), (2860,), (2861,), (2862,), (2863,), (2864,), (2865,), (2866,), (2867,), (2868,), (2869,), (2870,), (2871,), (2872,), (2873,), (2874,), (2875,), (2876,), (2877,), (2878,), (2879,), (2880,), (2881,), (2882,), (2883,), (2884,), (2885,), (2886,), (2887,), (2888,), (2889,), (2890,), (2891,), (2892,), (2893,), (2894,), (2895,), (2896,), (2897,), (2898,), (2899,), (2900,), (2901,), (2902,), (2903,), (2904,), (2905,), (2906,), (2907,), (2908,), (2909,), (2910,), (2911,), (2912,), (2913,), (2914,), (2915,), (2916,), (2917,), (2918,), (2919,), (2920,), (2921,), (2922,), (2923,), (2924,), (2925,), (3165,), (3166,), (3167,), (3168,), (3169,), (3170,), (3171,), (3172,), (3173,), (3174,), (3175,), (3176,), (3177,), (3178,), (3179,), (3180,), (3181,), (3182,), (3183,), (3184,), (3185,), (3186,), (3187,), (3188,), (3189,), (3190,), (3191,), (3192,), (3193,), (3194,), (3195,), (3196,), (3197,), (3198,), (3199,), (3200,), (3201,), (3202,), (3203,), (3204,), (3205,), (3206,), (3207,), (3208,), (3209,), (3210,), (3211,), (3212,), (3213,), (3214,), (3215,), (3216,), (3217,), (3218,), (3219,), (3220,), (3221,), (3222,), (3223,), (3224,), (3226,), (3227,), (3228,), (3229,), (3230,), (3231,), (3232,), (3233,), (3234,), (3235,), (3236,), (3237,), (3238,), (3239,), (3240,), (3241,), (3242,), (3243,), (3244,), (3245,), (3246,), (3247,), (3248,), (3249,), (3250,), (3251,), (3252,), (3337,), (3338,), (3340,), (3341,), (3342,), (3343,), (3344,), (3345,), (3346,), (3347,), (3348,), (3360,), (3361,), (3362,), (3363,), (3364,), (3428,), (3429,), ], ), ( 2 * 60 * 1000, 100 * 1024 * 1024, [ (2819,), (2820,), (2821,), (2822,), (2823,), (2824,), (2825,), (2826,), (2827,), (2828,), (2829,), (2830,), (2831,), (2832,), (2833,), (2834,), (2835,), (2836,), (2837,), (2838,), (2839,), (2840,), (2841,), (2842,), (2843,), (2844,), (2845,), (2846,), (2847,), (2848,), (2849,), (2850,), (2851,), (2852,), (2853,), (2854,), (2855,), (2856,), (2857,), (2858,), (2859,), (2860,), (2861,), (2862,), (2863,), (2864,), (2865,), (2866,), (2867,), (2868,), (2869,), (2870,), (2871,), (2872,), (2873,), (2874,), (2875,), (2876,), (2877,), (2878,), (2879,), (2880,), (2881,), (2882,), (2883,), (2884,), (2885,), (2886,), (2887,), (2888,), (2889,), (2890,), (2891,), (2892,), (2893,), (2894,), (2895,), (2896,), (2897,), (2898,), (2899,), (2900,), (2901,), (2902,), (2903,), (2904,), (2905,), (2906,), (2907,), (2908,), (2909,), (2910,), (2911,), (2912,), (2913,), (2914,), (2915,), (2916,), (2917,), (2918,), (2919,), (2920,), (2921,), (2922,), (2923,), (2924,), (2925,), (3165,), (3166,), (3167,), (3168,), (3169,), (3170,), (3171,), (3172,), (3173,), (3174,), (3175,), (3176,), (3177,), (3178,), (3179,), (3180,), (3181,), (3182,), (3183,), (3184,), (3185,), (3186,), (3187,), (3188,), (3189,), (3190,), (3191,), (3192,), (3193,), (3194,), (3195,), (3196,), (3197,), (3198,), (3199,), (3200,), (3201,), (3202,), (3203,), (3204,), (3205,), (3206,), (3207,), (3208,), (3209,), (3210,), (3211,), (3212,), (3213,), (3214,), (3215,), (3216,), (3217,), (3218,), (3219,), (3220,), (3221,), (3222,), (3223,), (3224,), (3226,), (3227,), (3228,), (3229,), (3230,), (3231,), (3232,), (3233,), (3234,), (3235,), (3236,), (3237,), (3238,), (3239,), (3240,), (3241,), (3242,), (3243,), (3244,), (3245,), (3246,), (3247,), (3248,), (3249,), (3250,), (3251,), (3252,), (3337,), (3338,), (3341,), (3342,), (3343,), (3344,), (3345,), (3346,), (3347,), (3348,), (3360,), (3361,), (3362,), (3363,), (3364,), (3428,), (3429,), ], ), ], ) def test_selections( min_time: int, min_bytes: int, expected_result: typing.List[typing.Tuple[int]], ) -> None: code_returned_rows = [tuple(row) for row in selections.selections(min_time, min_bytes)] assert code_returned_rows == expected_result
[((7, 1, 497, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(8, 4, 8, 42): '"""min_time, min_bytes, expected_result"""', (9, 4, 496, 5): '[(10 * 60 * 1000, 500 * 1024 * 1024, [(2820,), (2827,), (2832,), (2834,), (\n 2842,), (2844,), (2851,), (2852,), (2859,), (2862,), (2872,), (2878,),\n (2881,), (2890,), (2897,), (2899,), (2902,), (2903,), (2907,), (2910,),\n (2918,), (2920,), (3166,), (3167,), (3224,), (3226,), (3228,), (3229,),\n (3230,), (3231,), (3233,), (3234,), (3235,), (3236,), (3239,), (3242,),\n (3243,), (3244,), (3245,), (3246,), (3247,), (3249,), (3251,), (3338,)]\n ), (5 * 60 * 1000, 50 * 1024 * 1024, [(1666,), (2819,), (2820,), (2821,\n ), (2822,), (2823,), (2824,), (2825,), (2826,), (2827,), (2828,), (2829\n ,), (2830,), (2831,), (2832,), (2833,), (2834,), (2835,), (2836,), (\n 2837,), (2838,), (2839,), (2840,), (2841,), (2842,), (2843,), (2844,),\n (2845,), (2846,), (2847,), (2848,), (2849,), (2850,), (2851,), (2852,),\n (2853,), (2854,), (2855,), (2856,), (2857,), (2858,), (2859,), (2860,),\n (2861,), (2862,), (2863,), (2864,), (2865,), (2866,), (2867,), (2868,),\n (2869,), (2870,), (2871,), (2872,), (2873,), (2874,), (2875,), (2876,),\n (2877,), (2878,), (2879,), (2880,), (2881,), (2882,), (2883,), (2884,),\n (2885,), (2886,), (2887,), (2888,), (2889,), (2890,), (2891,), (2892,),\n (2893,), (2894,), (2895,), (2896,), (2897,), (2898,), (2899,), (2900,),\n (2901,), (2902,), (2903,), (2904,), (2905,), (2906,), (2907,), (2908,),\n (2909,), (2910,), (2911,), (2912,), (2913,), (2914,), (2915,), (2916,),\n (2917,), (2918,), (2919,), (2920,), (2921,), (2922,), (2923,), (2924,),\n (2925,), (3165,), (3166,), (3167,), (3168,), (3169,), (3170,), (3171,),\n (3172,), (3173,), (3174,), (3175,), (3176,), (3177,), (3178,), (3179,),\n (3180,), (3181,), (3182,), (3183,), (3184,), (3185,), (3186,), (3187,),\n (3188,), (3189,), (3190,), (3191,), (3192,), (3193,), (3194,), (3195,),\n (3196,), (3197,), (3198,), (3199,), (3200,), (3201,), (3202,), (3203,),\n (3204,), (3205,), (3206,), (3207,), (3208,), (3209,), (3210,), (3211,),\n (3212,), (3213,), (3214,), (3215,), (3216,), (3217,), (3218,), (3219,),\n (3220,), (3221,), (3222,), (3223,), (3224,), (3226,), (3227,), (3228,),\n (3229,), (3230,), (3231,), (3232,), (3233,), (3234,), (3235,), (3236,),\n (3237,), (3238,), (3239,), (3240,), (3241,), (3242,), (3243,), (3244,),\n (3245,), (3246,), (3247,), (3248,), (3249,), (3250,), (3251,), (3252,),\n (3337,), (3338,), (3340,), (3341,), (3342,), (3343,), (3344,), (3345,),\n (3346,), (3347,), (3348,), (3360,), (3361,), (3362,), (3363,), (3364,),\n (3428,), (3429,)]), (2 * 60 * 1000, 100 * 1024 * 1024, [(2819,), (2820,\n ), (2821,), (2822,), (2823,), (2824,), (2825,), (2826,), (2827,), (2828\n ,), (2829,), (2830,), (2831,), (2832,), (2833,), (2834,), (2835,), (\n 2836,), (2837,), (2838,), (2839,), (2840,), (2841,), (2842,), (2843,),\n (2844,), (2845,), (2846,), (2847,), (2848,), (2849,), (2850,), (2851,),\n (2852,), (2853,), (2854,), (2855,), (2856,), (2857,), (2858,), (2859,),\n (2860,), (2861,), (2862,), (2863,), (2864,), (2865,), (2866,), (2867,),\n (2868,), (2869,), (2870,), (2871,), (2872,), (2873,), (2874,), (2875,),\n (2876,), (2877,), (2878,), (2879,), (2880,), (2881,), (2882,), (2883,),\n (2884,), (2885,), (2886,), (2887,), (2888,), (2889,), (2890,), (2891,),\n (2892,), (2893,), (2894,), (2895,), (2896,), (2897,), (2898,), (2899,),\n (2900,), (2901,), (2902,), (2903,), (2904,), (2905,), (2906,), (2907,),\n (2908,), (2909,), (2910,), (2911,), (2912,), (2913,), (2914,), (2915,),\n (2916,), (2917,), (2918,), (2919,), (2920,), (2921,), (2922,), (2923,),\n (2924,), (2925,), (3165,), (3166,), (3167,), (3168,), (3169,), (3170,),\n (3171,), (3172,), (3173,), (3174,), (3175,), (3176,), (3177,), (3178,),\n (3179,), (3180,), (3181,), (3182,), (3183,), (3184,), (3185,), (3186,),\n (3187,), (3188,), (3189,), (3190,), (3191,), (3192,), (3193,), (3194,),\n (3195,), (3196,), (3197,), (3198,), (3199,), (3200,), (3201,), (3202,),\n (3203,), (3204,), (3205,), (3206,), (3207,), (3208,), (3209,), (3210,),\n (3211,), (3212,), (3213,), (3214,), (3215,), (3216,), (3217,), (3218,),\n (3219,), (3220,), (3221,), (3222,), (3223,), (3224,), (3226,), (3227,),\n (3228,), (3229,), (3230,), (3231,), (3232,), (3233,), (3234,), (3235,),\n (3236,), (3237,), (3238,), (3239,), (3240,), (3241,), (3242,), (3243,),\n (3244,), (3245,), (3246,), (3247,), (3248,), (3249,), (3250,), (3251,),\n (3252,), (3337,), (3338,), (3341,), (3342,), (3343,), (3344,), (3345,),\n (3346,), (3347,), (3348,), (3360,), (3361,), (3362,), (3363,), (3364,),\n (3428,), (3429,)])]'}, {}), "('min_time, min_bytes, expected_result', [(10 * 60 *\n 1000, 500 * 1024 * 1024, [(2820,), (2827,), (2832,), (2834,), (2842,),\n (2844,), (2851,), (2852,), (2859,), (2862,), (2872,), (2878,), (2881,),\n (2890,), (2897,), (2899,), (2902,), (2903,), (2907,), (2910,), (2918,),\n (2920,), (3166,), (3167,), (3224,), (3226,), (3228,), (3229,), (3230,),\n (3231,), (3233,), (3234,), (3235,), (3236,), (3239,), (3242,), (3243,),\n (3244,), (3245,), (3246,), (3247,), (3249,), (3251,), (3338,)]), (5 * \n 60 * 1000, 50 * 1024 * 1024, [(1666,), (2819,), (2820,), (2821,), (2822\n ,), (2823,), (2824,), (2825,), (2826,), (2827,), (2828,), (2829,), (\n 2830,), (2831,), (2832,), (2833,), (2834,), (2835,), (2836,), (2837,),\n (2838,), (2839,), (2840,), (2841,), (2842,), (2843,), (2844,), (2845,),\n (2846,), (2847,), (2848,), (2849,), (2850,), (2851,), (2852,), (2853,),\n (2854,), (2855,), (2856,), (2857,), (2858,), (2859,), (2860,), (2861,),\n (2862,), (2863,), (2864,), (2865,), (2866,), (2867,), (2868,), (2869,),\n (2870,), (2871,), (2872,), (2873,), (2874,), (2875,), (2876,), (2877,),\n (2878,), (2879,), (2880,), (2881,), (2882,), (2883,), (2884,), (2885,),\n (2886,), (2887,), (2888,), (2889,), (2890,), (2891,), (2892,), (2893,),\n (2894,), (2895,), (2896,), (2897,), (2898,), (2899,), (2900,), (2901,),\n (2902,), (2903,), (2904,), (2905,), (2906,), (2907,), (2908,), (2909,),\n (2910,), (2911,), (2912,), (2913,), (2914,), (2915,), (2916,), (2917,),\n (2918,), (2919,), (2920,), (2921,), (2922,), (2923,), (2924,), (2925,),\n (3165,), (3166,), (3167,), (3168,), (3169,), (3170,), (3171,), (3172,),\n (3173,), (3174,), (3175,), (3176,), (3177,), (3178,), (3179,), (3180,),\n (3181,), (3182,), (3183,), (3184,), (3185,), (3186,), (3187,), (3188,),\n (3189,), (3190,), (3191,), (3192,), (3193,), (3194,), (3195,), (3196,),\n (3197,), (3198,), (3199,), (3200,), (3201,), (3202,), (3203,), (3204,),\n (3205,), (3206,), (3207,), (3208,), (3209,), (3210,), (3211,), (3212,),\n (3213,), (3214,), (3215,), (3216,), (3217,), (3218,), (3219,), (3220,),\n (3221,), (3222,), (3223,), (3224,), (3226,), (3227,), (3228,), (3229,),\n (3230,), (3231,), (3232,), (3233,), (3234,), (3235,), (3236,), (3237,),\n (3238,), (3239,), (3240,), (3241,), (3242,), (3243,), (3244,), (3245,),\n (3246,), (3247,), (3248,), (3249,), (3250,), (3251,), (3252,), (3337,),\n (3338,), (3340,), (3341,), (3342,), (3343,), (3344,), (3345,), (3346,),\n (3347,), (3348,), (3360,), (3361,), (3362,), (3363,), (3364,), (3428,),\n (3429,)]), (2 * 60 * 1000, 100 * 1024 * 1024, [(2819,), (2820,), (2821,\n ), (2822,), (2823,), (2824,), (2825,), (2826,), (2827,), (2828,), (2829\n ,), (2830,), (2831,), (2832,), (2833,), (2834,), (2835,), (2836,), (\n 2837,), (2838,), (2839,), (2840,), (2841,), (2842,), (2843,), (2844,),\n (2845,), (2846,), (2847,), (2848,), (2849,), (2850,), (2851,), (2852,),\n (2853,), (2854,), (2855,), (2856,), (2857,), (2858,), (2859,), (2860,),\n (2861,), (2862,), (2863,), (2864,), (2865,), (2866,), (2867,), (2868,),\n (2869,), (2870,), (2871,), (2872,), (2873,), (2874,), (2875,), (2876,),\n (2877,), (2878,), (2879,), (2880,), (2881,), (2882,), (2883,), (2884,),\n (2885,), (2886,), (2887,), (2888,), (2889,), (2890,), (2891,), (2892,),\n (2893,), (2894,), (2895,), (2896,), (2897,), (2898,), (2899,), (2900,),\n (2901,), (2902,), (2903,), (2904,), (2905,), (2906,), (2907,), (2908,),\n (2909,), (2910,), (2911,), (2912,), (2913,), (2914,), (2915,), (2916,),\n (2917,), (2918,), (2919,), (2920,), (2921,), (2922,), (2923,), (2924,),\n (2925,), (3165,), (3166,), (3167,), (3168,), (3169,), (3170,), (3171,),\n (3172,), (3173,), (3174,), (3175,), (3176,), (3177,), (3178,), (3179,),\n (3180,), (3181,), (3182,), (3183,), (3184,), (3185,), (3186,), (3187,),\n (3188,), (3189,), (3190,), (3191,), (3192,), (3193,), (3194,), (3195,),\n (3196,), (3197,), (3198,), (3199,), (3200,), (3201,), (3202,), (3203,),\n (3204,), (3205,), (3206,), (3207,), (3208,), (3209,), (3210,), (3211,),\n (3212,), (3213,), (3214,), (3215,), (3216,), (3217,), (3218,), (3219,),\n (3220,), (3221,), (3222,), (3223,), (3224,), (3226,), (3227,), (3228,),\n (3229,), (3230,), (3231,), (3232,), (3233,), (3234,), (3235,), (3236,),\n (3237,), (3238,), (3239,), (3240,), (3241,), (3242,), (3243,), (3244,),\n (3245,), (3246,), (3247,), (3248,), (3249,), (3250,), (3251,), (3252,),\n (3337,), (3338,), (3341,), (3342,), (3343,), (3344,), (3345,), (3346,),\n (3347,), (3348,), (3360,), (3361,), (3362,), (3363,), (3364,), (3428,),\n (3429,)])])", False, 'import pytest\n'), ((503, 48, 503, 90), 'src.selections.selections', 'selections.selections', ({(503, 70, 503, 78): 'min_time', (503, 80, 503, 89): 'min_bytes'}, {}), '(min_time, min_bytes)', False, 'from src import selections\n')]
richardqiu/pyjanitor
tests/utils/test_clean_accounting_column.py
aa3150e7b8e2adc4733ea206ea9c3093e21d4025
import pytest from janitor.utils import _clean_accounting_column @pytest.mark.utils def test_clean_accounting_column(): test_str = "(1,000)" assert _clean_accounting_column(test_str) == float(-1000) @pytest.mark.utils def test_clean_accounting_column_zeroes(): test_str = "()" assert _clean_accounting_column(test_str) == 0.00
[((9, 11, 9, 45), 'janitor.utils._clean_accounting_column', '_clean_accounting_column', ({(9, 36, 9, 44): 'test_str'}, {}), '(test_str)', False, 'from janitor.utils import _clean_accounting_column\n'), ((15, 11, 15, 45), 'janitor.utils._clean_accounting_column', '_clean_accounting_column', ({(15, 36, 15, 44): 'test_str'}, {}), '(test_str)', False, 'from janitor.utils import _clean_accounting_column\n')]
icadot86/bert
downloadParagraph.py
42070209183dab3b5ff59b0dea1398a9538960f3
# coding=utf-8 import sys, getopt import urllib import requests import requests_cache import re import time from bs4 import BeautifulSoup from requests import Session sys.path.append("/home/taejoon1kim/BERT/my_bert") from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON def preprocessor(text): if "감독" in text: return text[0:text.find("감독")] elif "등장인물" in text: return text[0:text.find("등장인물")] elif "누구야" in text: return text[0:text.find("누구야")] elif "알려줘" in text: return text[0:text.find("알려줘")] elif "보여줘" in text: return text[0:text.find("보여줘")] elif "찾아줘" in text: return text[0:text.find("찾아줘")] elif "언제야" in text: return text[0:text.find("언제")] elif "어디" in text: return text[0:text.find("어디")] elif "뭐야" in text: return text[0:text.find("뭐야")] else : return text def checkQType(text): global Q_TYPE if "감독" in text or "어디서" in text or "언제" in text or "뭐야" in text: Q_TYPE = 2 elif "누구야" in text: Q_TYPE = 1 else: Q_TYPE = 3 SEARCH_RESULT['Q_TYPE'] = Q_TYPE print("QUESTION TYPE : ", Q_TYPE) WIKI_URL = "wikipedia.org" YOUTUBE_URL = "youtube.com/channel" NO_RESULT = "no_result" SEARCH_RESULT = { "WIKI" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"}, "FIRST" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"}, "YOUTUBE" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"}, "test_input.json" : f"{NO_RESULT}", "search_result.json" : f"{NO_RESULT}", "Q_TYPE" : f"{NO_RESULT}" } def downloadURL(URL): # desktop user-agent USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0" # mobile user-agent MOBILE_USER_AGENT = "Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36" headers = {"user-agent" : USER_AGENT} #headers = {"user-agent" : USER_AGENT, "cache-contorl" : "public,max-age=3600"} #headers = {"user-agent" : USER_AGENT, "cache-contorl" : "no-cache"} #s = Session() #s.headers.update(headers) resp = requests.get(URL, headers=headers) #resp = s.get(URL) results = [{"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"}] print(resp.status_code) if resp.status_code == 200: soup = BeautifulSoup(resp.content, "lxml") results = [] for g in soup.find_all('div', class_='r'): anchors = g.find_all('a') if anchors: link = anchors[0]['href'] title = g.find('h3').text item = { "title": title, "link": link } results.append(item) #print(link) global SEARCH_RESULT if link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT: SEARCH_RESULT['WIKI']['title'] = title SEARCH_RESULT['WIKI']['link'] = link elif link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT: SEARCH_RESULT['YOUTUBE']['title'] = title SEARCH_RESULT['YOUTUBE']['link'] = link if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT: break SEARCH_RESULT['FIRST']['title'] = results[0].get('title') SEARCH_RESULT['FIRST']['link'] = results[0].get('link') else: SEARCH_RESULT['FIRST']['title'] = f"resp.status_code {resp.status_code}" return results def download(text): global cache cache = getDownloadCachePath(text) global start, Q_TYPE init_start = time.time() start = time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if cacheExist(cache) == False: if True: checkQType(text) query_text = preprocessor(text) ## 1st SEARCH query = query_text query = query.replace(' ', '+') if Q_TYPE <= 2: URL = f"https://google.com/search?q={query} site:wikipedia.org" else : URL = f"https://google.com/search?q={query}" print(URL) downloadURL(URL) printTime("1st Search Time") pWithoutTag = f"{NO_RESULT}" imgTag = f"{NO_RESULT}" ## 2nd SEARCH if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE > 2: URL = f"https://google.com/search?q={query} site:wikipedia.org" downloadURL(URL) if SEARCH_RESULT['WIKI']['title'] == NO_RESULT: pWithoutTag = "위키피디아가 없네요. 링크를 열어보세요" else: resp = requests.get(SEARCH_RESULT['WIKI']['link']) if resp.status_code == 200: soup = BeautifulSoup(resp.content, "lxml") p = soup.find('p') pWithoutTag = re.sub('<.+?>', '', str(p), 0).strip() pWithoutTag = re.sub('"', '', str(pWithoutTag), 0).strip() pWithoutTag = re.sub('\n', ' ', str(pWithoutTag), 0).strip() imgTag = "http:" + soup.find('a', {'class':'image'}).find('img')['src'] ## GENERATE BERT INPUT JSON_1 = "{\"version\":\"mytest_dev\",\"data\":[{\"paragraphs\":[{\"qas\":[{\"answers\":[{\"text\":\"테스트\",\"answer_start\":0}],\"id\":\"1-1\",\"question\":\"테스트\"}],\"context\":\"" JSON_2 = "\"}],\"title\":\"테스트\"}]}" FULL_JSON = JSON_1 + pWithoutTag + JSON_2 writeJson(FULL_JSON, BERT_INPUT_JSON) printTime("2nd Search Time") SEARCH_RESULT['test_input.json'] = FULL_JSON ## GENERATE SEARCH RESULT FULL_JSON = "{\"google\":[{\"title\":\"" + SEARCH_RESULT['FIRST']['title'] + "\",\"link\":\"" + SEARCH_RESULT['FIRST']['link'] + "\"}],\"wiki\":[{\"title\":\"" + SEARCH_RESULT['WIKI']['title'] + "\",\"link\":\"" + SEARCH_RESULT['WIKI']['link'] + "\"}],\"youtube\":[{\"title\":\"" + SEARCH_RESULT['YOUTUBE']['title'] + "\",\"link\":\"" + SEARCH_RESULT['YOUTUBE']['link'] + "\"}],\"Q_TYPE\":\"" + str(Q_TYPE) + "\",\"IMG_SRC\":\"" + str(imgTag) + "\"}" writeJson(FULL_JSON, BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json'] = FULL_JSON writeCache(cache, SEARCH_RESULT) else: CACHE_RESULT = readCache(cache) writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON) Q_TYPE = CACHE_RESULT['Q_TYPE'] print(f"[SEARCH] Total time : {format(time.time() - init_start, '0.5f')}") return Q_TYPE def writeJson(json, filePath): f = open(filePath, 'w') f.write(json) f.close() def printTime(text): global start print(f"[SEARCH] {text} : {format(time.time() - start, '0.5f')}") start = time.time() def main(argv): download(argv[1]) if __name__ == "__main__": main(sys.argv)
[((12, 0, 12, 49), 'sys.path.append', 'sys.path.append', ({(12, 16, 12, 48): '"""/home/taejoon1kim/BERT/my_bert"""'}, {}), "('/home/taejoon1kim/BERT/my_bert')", False, 'import sys, getopt\n'), ((76, 11, 76, 45), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((114, 12, 114, 38), 'utils.cacheUtils.getDownloadCachePath', 'getDownloadCachePath', ({(114, 33, 114, 37): 'text'}, {}), '(text)', False, 'from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath\n'), ((116, 17, 116, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((117, 12, 117, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((118, 4, 118, 81), 'requests_cache.install_cache', 'requests_cache.install_cache', ({(118, 33, 118, 80): '"""/home/taejoon1kim/BERT/my_bert/download_cache"""'}, {}), "('/home/taejoon1kim/BERT/my_bert/download_cache')", False, 'import requests_cache\n'), ((193, 12, 193, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((82, 15, 82, 50), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(82, 29, 82, 41): 'resp.content', (82, 43, 82, 49): '"""lxml"""'}, {}), "(resp.content, 'lxml')", False, 'from bs4 import BeautifulSoup\n'), ((173, 8, 173, 40), 'utils.cacheUtils.writeCache', 'writeCache', ({(173, 19, 173, 24): 'cache', (173, 26, 173, 39): 'SEARCH_RESULT'}, {}), '(cache, SEARCH_RESULT)', False, 'from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath\n'), ((175, 23, 175, 39), 'utils.cacheUtils.readCache', 'readCache', ({(175, 33, 175, 38): 'cache'}, {}), '(cache)', False, 'from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath\n'), ((148, 19, 148, 62), 'requests.get', 'requests.get', ({(148, 32, 148, 61): "SEARCH_RESULT['WIKI']['link']"}, {}), "(SEARCH_RESULT['WIKI']['link'])", False, 'import requests\n'), ((150, 23, 150, 58), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(150, 37, 150, 49): 'resp.content', (150, 51, 150, 57): '"""lxml"""'}, {}), "(resp.content, 'lxml')", False, 'from bs4 import BeautifulSoup\n'), ((180, 42, 180, 53), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((192, 38, 192, 49), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')]
LucasChenLC/courseManager2
data_io.py
3f91ea72dbc0a3f3afcc88c7f0959edb6c33adf9
from xml.dom.minidom import Document, parse class InfoBatch: def __init__(self, title, pre_node_titles): self.title = title self.pre_node_titles = pre_node_titles def save_data_xml(course_list, file_path): doc = Document() courses = doc.createElement('course_list') doc.appendChild(courses) for course in course_list: single_course = doc.createElement('course') courses.appendChild(single_course) single_course_name = doc.createElement('course_name') course_name = doc.createTextNode(course.name) single_course.appendChild(single_course_name) single_course_name.appendChild(course_name) pre_course = doc.createElement('pre_course') pre_course_name = ','.join(course.pre_course) course_name = doc.createTextNode(pre_course_name) single_course.appendChild(pre_course) pre_course.appendChild(course_name) after_course = doc.createElement('after_course') after_course_name = ','.join(course.after_course) course_name = doc.createTextNode(after_course_name) single_course.appendChild(after_course) after_course.appendChild(course_name) with open(file_path, 'wb+') as f: f.write(doc.toprettyxml(indent='\t', encoding='utf-8')) def load_data_xml(file_path): info_list = [] doc = parse(file_path) courses = doc.getElementsByTagName("course") for course in courses: title = course.getElementsByTagName("course_name")[0].childNodes[0].data try: pre_node_titles = course.getElementsByTagName("pre_node_titles")[0].childNodes[0].data pre_node_titles = pre_node_titles.split(',') info_list.append(InfoBatch(title, pre_node_titles)) except IndexError: info_list.append(InfoBatch(title, [])) return info_list ''' course_list = [] course_list.append(Course('Advance Math')) course_list.append(Course('Linear Algebra')) course_list.append(Course('Procedure Oriented Programming')) course_list.append(Course('Object Oriented Programming')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('College Physics')) course_list[-1].add_pre_course(course_list, ['Advance Math']) course_list.append(Course('Digital Logic')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('Computer Organization')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic']) course_list.append(Course('Computer Architecture')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic', 'Computer Organization']) save_data_xml(course_list, 'resource/data/data.xml') '''
[((11, 10, 11, 20), 'xml.dom.minidom.Document', 'Document', ({}, {}), '()', False, 'from xml.dom.minidom import Document, parse\n'), ((42, 10, 42, 26), 'xml.dom.minidom.parse', 'parse', ({(42, 16, 42, 25): 'file_path'}, {}), '(file_path)', False, 'from xml.dom.minidom import Document, parse\n')]
jlandrum/theheck
tests/rules/test_git_rm_local_modifications.py
d2c008b6ca14220504be95f887253ddd9f5e9f72
import pytest from theheck.rules.git_rm_local_modifications import match, get_new_command from theheck.types import Command @pytest.fixture def output(target): return ('error: the following file has local modifications:\n {}\n(use ' '--cached to keep the file, or -f to force removal)').format(target) @pytest.mark.parametrize('script, target', [ ('git rm foo', 'foo'), ('git rm foo bar', 'bar')]) def test_match(output, script, target): assert match(Command(script, output)) @pytest.mark.parametrize('script', ['git rm foo', 'git rm foo bar', 'git rm']) def test_not_match(script): assert not match(Command(script, '')) @pytest.mark.parametrize('script, target, new_command', [ ('git rm foo', 'foo', ['git rm --cached foo', 'git rm -f foo']), ('git rm foo bar', 'bar', ['git rm --cached foo bar', 'git rm -f foo bar'])]) def test_get_new_command(output, script, target, new_command): assert get_new_command(Command(script, output)) == new_command
[((12, 1, 14, 31), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(12, 25, 12, 41): '"""script, target"""', (12, 43, 14, 30): "[('git rm foo', 'foo'), ('git rm foo bar', 'bar')]"}, {}), "('script, target', [('git rm foo', 'foo'), (\n 'git rm foo bar', 'bar')])", False, 'import pytest\n'), ((19, 1, 19, 78), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(19, 25, 19, 33): '"""script"""', (19, 35, 19, 77): "['git rm foo', 'git rm foo bar', 'git rm']"}, {}), "('script', ['git rm foo', 'git rm foo bar', 'git rm'])", False, 'import pytest\n'), ((24, 1, 26, 81), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(24, 25, 24, 54): '"""script, target, new_command"""', (24, 56, 26, 80): "[('git rm foo', 'foo', ['git rm --cached foo', 'git rm -f foo']), (\n 'git rm foo bar', 'bar', ['git rm --cached foo bar', 'git rm -f foo bar'])]"}, {}), "('script, target, new_command', [('git rm foo',\n 'foo', ['git rm --cached foo', 'git rm -f foo']), ('git rm foo bar',\n 'bar', ['git rm --cached foo bar', 'git rm -f foo bar'])])", False, 'import pytest\n'), ((16, 17, 16, 40), 'theheck.types.Command', 'Command', ({(16, 25, 16, 31): 'script', (16, 33, 16, 39): 'output'}, {}), '(script, output)', False, 'from theheck.types import Command\n'), ((21, 21, 21, 40), 'theheck.types.Command', 'Command', ({(21, 29, 21, 35): 'script', (21, 37, 21, 39): '""""""'}, {}), "(script, '')", False, 'from theheck.types import Command\n'), ((28, 27, 28, 50), 'theheck.types.Command', 'Command', ({(28, 35, 28, 41): 'script', (28, 43, 28, 49): 'output'}, {}), '(script, output)', False, 'from theheck.types import Command\n')]
statisticsnorway/microdata-data-service
application.py
d477b7b75589d4c977771122558c948c040a1106
import logging import json_logging import tomlkit import uvicorn from fastapi import FastAPI, status from fastapi.encoders import jsonable_encoder from fastapi.openapi.docs import ( get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) from fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles from starlette.responses import PlainTextResponse, Response from data_service.api.data_api import data_router from data_service.api.observability_api import observability_router from data_service.config import config from data_service.core.processor import NotFoundException from data_service.core.filters import EmptyResultSetException """ Self-hosting JavaScript and CSS for docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs """ data_service_app = FastAPI(docs_url=None, redoc_url=None) data_service_app.mount("/static", StaticFiles(directory="static"), name="static") data_service_app.include_router(data_router) data_service_app.include_router(observability_router) @data_service_app.get("/docs", include_in_schema=False) async def custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + " - Swagger UI", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url="/static/swagger-ui-bundle.js", swagger_css_url="/static/swagger-ui.css", ) @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @data_service_app.get("/redoc", include_in_schema=False) async def redoc_html(): return get_redoc_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + " - ReDoc", redoc_js_url="/static/redoc.standalone.js", ) def _get_project_meta(): with open('./pyproject.toml') as pyproject: file_contents = pyproject.read() return tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta = _get_project_meta() class CustomJSONLog(json_logging.JSONLogFormatter): """ Customized application logger """ def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONLog, self)._format_log_object(record, request_util) json_log_object.update({ "message": record.getMessage() }) if "exc_info" in json_log_object: json_log_object["error.stack"] = json_log_object.pop('exc_info') del json_log_object['filename'] json_log_object["@timestamp"] = json_log_object.pop('written_at') json_log_object["loggerName"] = json_log_object.pop('logger') json_log_object["levelName"] = json_log_object.pop('level') json_log_object["schemaVersion"] = "v3" json_log_object["serviceVersion"] = str(pkg_meta['version']) json_log_object["serviceName"] = "data-service" del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['msg'] del json_log_object['module'] del json_log_object['line_no'] return json_log_object class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): """ Customized request logger """ def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util) json_log_object.update({ "message": record.getMessage() }) json_log_object["@timestamp"] = json_log_object.pop('written_at') json_log_object["xRequestId"] = json_log_object.pop('correlation_id') json_log_object["url"] = json_log_object.pop('request') json_log_object["source_host"] = json_log_object.pop('remote_host') json_log_object["responseTime"] = json_log_object.pop('response_time_ms') json_log_object["statusCode"] = json_log_object.pop('response_status') del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['remote_user'] del json_log_object['referer'] del json_log_object['x_forwarded_for'] del json_log_object['protocol'] del json_log_object['remote_ip'] del json_log_object['request_size_b'] del json_log_object['remote_port'] del json_log_object['request_received_at'] del json_log_object['response_size_b'] del json_log_object['response_content_type'] del json_log_object['response_sent_at'] return json_log_object @data_service_app.exception_handler(EmptyResultSetException) async def empty_result_set_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return Response( status_code=status.HTTP_204_NO_CONTENT ) @data_service_app.exception_handler(NotFoundException) async def not_found_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return JSONResponse( status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({"detail": "No such datastructure"}) ) @data_service_app.exception_handler(Exception) async def unknown_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return PlainTextResponse("Internal Server Error", status_code=500) @data_service_app.on_event("startup") def startup_event(): json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO) json_logging.config_root_logger() log = logging.getLogger(__name__) log.info('Started data-service') log.info(config.get_settings().print()) if __name__ == "__main__": uvicorn.run(data_service_app, host="0.0.0.0", port=8000)
[((27, 19, 27, 57), 'fastapi.FastAPI', 'FastAPI', (), '', False, 'from fastapi import FastAPI, status\n'), ((28, 34, 28, 65), 'fastapi.staticfiles.StaticFiles', 'StaticFiles', (), '', False, 'from fastapi.staticfiles import StaticFiles\n'), ((36, 11, 42, 5), 'fastapi.openapi.docs.get_swagger_ui_html', 'get_swagger_ui_html', (), '', False, 'from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html\n'), ((47, 11, 47, 48), 'fastapi.openapi.docs.get_swagger_ui_oauth2_redirect_html', 'get_swagger_ui_oauth2_redirect_html', ({}, {}), '()', False, 'from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html\n'), ((52, 11, 56, 5), 'fastapi.openapi.docs.get_redoc_html', 'get_redoc_html', (), '', False, 'from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html\n'), ((140, 10, 140, 37), 'logging.getLogger', 'logging.getLogger', ({(140, 28, 140, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((142, 11, 144, 5), 'starlette.responses.Response', 'Response', (), '', False, 'from starlette.responses import PlainTextResponse, Response\n'), ((149, 10, 149, 37), 'logging.getLogger', 'logging.getLogger', ({(149, 28, 149, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((159, 10, 159, 37), 'logging.getLogger', 'logging.getLogger', ({(159, 28, 159, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((161, 11, 161, 70), 'starlette.responses.PlainTextResponse', 'PlainTextResponse', (), '', False, 'from starlette.responses import PlainTextResponse, Response\n'), ((166, 4, 166, 79), 'json_logging.init_fastapi', 'json_logging.init_fastapi', (), '', False, 'import json_logging\n'), ((167, 4, 167, 106), 'json_logging.init_request_instrument', 'json_logging.init_request_instrument', (), '', False, 'import json_logging\n'), ((169, 4, 169, 43), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((170, 4, 170, 37), 'json_logging.config_root_logger', 'json_logging.config_root_logger', ({}, {}), '()', False, 'import json_logging\n'), ((172, 10, 172, 37), 'logging.getLogger', 'logging.getLogger', ({(172, 28, 172, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((179, 4, 179, 60), 'uvicorn.run', 'uvicorn.run', (), '', False, 'import uvicorn\n'), ((63, 11, 63, 39), 'tomlkit.parse', 'tomlkit.parse', ({(63, 25, 63, 38): 'file_contents'}, {}), '(file_contents)', False, 'import tomlkit\n'), ((153, 16, 153, 69), 'fastapi.encoders.jsonable_encoder', 'jsonable_encoder', ({(153, 33, 153, 68): "{'detail': 'No such datastructure'}"}, {}), "({'detail': 'No such datastructure'})", False, 'from fastapi.encoders import jsonable_encoder\n'), ((175, 13, 175, 34), 'data_service.config.config.get_settings', 'config.get_settings', ({}, {}), '()', False, 'from data_service.config import config\n')]
dtborders/graspologic
graspologic/embed/n2v.py
8ea9a47cabe35ad28ec9d381e525358c2027f619
# Copyright (c) Microsoft Corporation and contributors. # Licensed under the MIT License. import logging import math import time from typing import Any, List, Optional, Tuple, Union import networkx as nx import numpy as np from ..utils import remap_node_ids def node2vec_embed( graph: Union[nx.Graph, nx.DiGraph], num_walks: int = 10, walk_length: int = 80, return_hyperparameter: float = 1.0, inout_hyperparameter: float = 1.0, dimensions: int = 128, window_size: int = 10, workers: int = 8, iterations: int = 1, interpolate_walk_lengths_by_node_degree: bool = True, random_seed: Optional[int] = None, ) -> Tuple[np.array, List[Any]]: """ Generates a node2vec embedding from a given graph. Will follow the word2vec algorithm to create the embedding. Parameters ---------- graph: Union[nx.Graph, nx.DiGraph] A networkx graph or digraph. A multigraph should be turned into a non-multigraph so that the calling user properly handles the multi-edges (i.e. aggregate weights or take last edge weight). If the graph is unweighted, the weight of each edge will default to 1. num_walks : int Number of walks per source. Default is 10. walk_length: int Length of walk per source. Default is 80. return_hyperparameter : float Return hyperparameter (p). Default is 1.0 inout_hyperparameter : float Inout hyperparameter (q). Default is 1.0 dimensions : int Dimensionality of the word vectors. Default is 128. window_size : int Maximum distance between the current and predicted word within a sentence. Default is 10. workers : int Use these many worker threads to train the model. Default is 8. iterations : int Number of epochs in stochastic gradient descent (SGD) interpolate_walk_lengths_by_node_degree : bool Use a dynamic walk length that corresponds to each nodes degree. If the node is in the bottom 20 percentile, default to a walk length of 1. If it is in the top 10 percentile, use ``walk_length``. If it is in the 20-80 percentiles, linearly interpolate between 1 and ``walk_length``. This will reduce lower degree nodes from biasing your resulting embedding. If a low degree node has the same number of walks as a high degree node (which it will if this setting is not on), then the lower degree nodes will take a smaller breadth of random walks when compared to the high degree nodes. This will result in your lower degree walks dominating your higher degree nodes. random_seed : int Seed to be used for reproducible results. Default is None and will produce a random output. Note that for a fully deterministically-reproducible run, you must also limit to a single worker thread (`workers=1`), to eliminate ordering jitter from OS thread scheduling. In addition the environment variable ``PYTHONHASHSEED`` must be set to control hash randomization. Returns ------- Tuple[np.array, List[Any]] A tuple containing a matrix, with each row index corresponding to the embedding for each node. The tuple also contains a vector containing the corresponding vertex labels for each row in the matrix. The matrix and vector are positionally correlated. Notes ----- The original reference implementation of node2vec comes from Aditya Grover from https://github.com/aditya-grover/node2vec/. Further details on the Alias Method used in this functionality can be found at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References ---------- .. [1] Aditya Grover and Jure Leskovec "node2vec: Scalable Feature Learning for Networks." Knowledge Discovery and Data Mining, 2016. """ _preconditions( graph, num_walks, walk_length, return_hyperparameter, inout_hyperparameter, dimensions, window_size, workers, iterations, interpolate_walk_lengths_by_node_degree, ) random_state = np.random.RandomState(seed=random_seed) node2vec_graph = _Node2VecGraph( graph, return_hyperparameter, inout_hyperparameter, random_state ) logging.info( f"Starting preprocessing of transition probabilities on graph with {str(len(graph.nodes()))} nodes and " f"{str(len(graph.edges()))} edges" ) start = time.time() logging.info(f"Starting at time {str(start)}") node2vec_graph._preprocess_transition_probabilities() logging.info(f"Simulating walks on graph at time {str(time.time())}") walks = node2vec_graph._simulate_walks( num_walks, walk_length, interpolate_walk_lengths_by_node_degree ) logging.info(f"Learning embeddings at time {str(time.time())}") model = _learn_embeddings( walks, dimensions, window_size, workers, iterations, random_seed ) end = time.time() logging.info( f"Completed. Ending time is {str(end)} Elapsed time is {str(start - end)}" ) labels = node2vec_graph.original_graph.nodes() remapped_labels = node2vec_graph.label_map_to_string return ( np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]), labels, ) def _assert_is_positive_int(name: str, value: int): if not isinstance(value, int): raise TypeError(f"{name} must be an int") if value <= 0: raise ValueError(f"{name} must be > 0") def _assert_is_nonnegative_float(name: str, value: float): if not isinstance(value, float): raise TypeError(f"{name} must be a float") if value < 0.0: raise ValueError(f"{name} must be >= 0.0") def _preconditions( graph: Union[nx.Graph, nx.DiGraph], num_walks: int, walk_length: int, return_hyperparameter: float, inout_hyperparameter: float, dimensions: int, window_size: int, workers: int, iterations: int, interpolate_walk_lengths_by_node_degree: bool, ): if not isinstance(graph, nx.Graph): raise TypeError("graph must be a networkx Graph or DiGraph") if graph.is_multigraph(): raise ValueError( "This function does not work on multigraphs - because there are two reasonable ways to treat a " "multigraph with different behaviors, we insist that the caller create an appropriate Graph or " "DiGraph that represents the manner in which they'd like the multigraph to be treated for the " "purposes of this embedding" ) _assert_is_positive_int("num_walks", num_walks) _assert_is_positive_int("walk_length", walk_length) _assert_is_nonnegative_float("return_hyperparameter", return_hyperparameter) _assert_is_nonnegative_float("inout_hyperparameter", inout_hyperparameter) _assert_is_positive_int("dimensions", dimensions) _assert_is_positive_int("window_size", window_size) _assert_is_positive_int("workers", workers) _assert_is_positive_int("iterations", iterations) if not isinstance(interpolate_walk_lengths_by_node_degree, bool): raise TypeError("interpolate_walk_lengths_by_node_degree must be a bool") def _learn_embeddings( walks: List[Any], dimensions: int, window_size: int, workers: int, iterations: int, random_seed: Optional[int], ): """ Learn embeddings by optimizing the skip-gram objective using SGD. """ from gensim.models import Word2Vec walks = [list(map(str, walk)) for walk in walks] # Documentation - https://radimrehurek.com/gensim/models/word2vec.html model = Word2Vec( walks, size=dimensions, window=window_size, min_count=0, sg=1, # Training algorithm: 1 for skip-gram; otherwise CBOW workers=workers, iter=iterations, seed=random_seed, ) return model class _Node2VecGraph: """ Temporary inner state object for constructing the random walks Parameters ---------- graph: nx.Graph A networkx graph return_hyperparameter : float Return hyperparameter inout_hyperparameter : float Inout hyperparameter random_state : np.random.RandomState Random State for reproducible results. Default is None and will produce random results """ def __init__( self, graph: nx.Graph, return_hyperparameter: float, inout_hyperparameter: float, random_state: Optional[np.random.RandomState] = None, ): self.original_graph: nx.Graph = graph graph_with_new_ids, new_id_map = remap_node_ids(graph=graph) self.graph = graph_with_new_ids self.label_map_to_string = new_id_map self.is_directed = self.graph.is_directed() self.p = return_hyperparameter self.q = inout_hyperparameter self.random_state = random_state def node2vec_walk( self, walk_length: int, start_node: Any, degree_percentiles: Optional[np.ndarray], ): """ Simulate a random walk starting from start node. """ graph = self.graph alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk = [start_node] # Percentiles will be provided if we are using the 'interpolate_walk_lengths_by_node_degree' feature. # the intent of the code is to default the bottom 20% of to a minimal walk length, default the top 10% to a # maximum walk length, and interpolate the inner 70% linearly from min to max. # This is to avoid having your random walks be dominated by low degree nodes. If the low degree nodes have the # same number of walks as the high degree nodes, the low degree nodes will take a smaller breadth of paths # (due to their being less nodes to choose from) and will bias your resulting Word2Vec embedding if degree_percentiles is not None: degree = nx.degree(graph, start_node) walk_length = self._get_walk_length_interpolated( degree, degree_percentiles, walk_length ) while len(walk) < walk_length: current = walk[-1] current_neighbors = sorted(graph.neighbors(current)) if len(current_neighbors) > 0: if len(walk) == 1: walk.append( current_neighbors[ _alias_draw( alias_nodes[current][0], alias_nodes[current][1], self.random_state, ) ] ) else: prev = walk[-2] next = current_neighbors[ _alias_draw( alias_edges[(prev, current)][0], alias_edges[(prev, current)][1], self.random_state, ) ] walk.append(next) else: break return walk @staticmethod def _get_walk_length_interpolated( degree: int, percentiles: list, max_walk_length: int ): """ Given a node's degree, determine the length of a walk that should be used. If the degree is less than the first element of the percentiles list, default the walk length to 1. Otherwise, if the degree is greater than the last element of the list, default it to the max_walk_length. If it falls in the middle, do a linear interpolation to decide the length of the walk. """ new_walk_length = None for i, percentile in enumerate(percentiles): # if we are below the first percentile in the list, default to a walk length of 1 if i == 0 and degree < percentile: return 1 # otherwise, find which bucket we are going to be in. if degree <= percentile: new_walk_length = max_walk_length * ((i * 0.1) + 0.2) break # the degree is above the last percentile if not new_walk_length: new_walk_length = max_walk_length # a walk length of 0 is invalid but can happen depending on the percentiles used if new_walk_length < 1: new_walk_length = 1 return math.floor(new_walk_length) def _simulate_walks( self, num_walks: int, walk_length: int, interpolate_walk_lengths_by_node_degree: bool = False, ): """ Repeatedly simulate random walks from each node. """ graph = self.graph walks = [] nodes = list(graph.nodes()) degree_percentiles: Optional[np.ndarray] = None if interpolate_walk_lengths_by_node_degree: degree_percentiles = np.percentile( [degree for _, degree in graph.degree()], [x for x in range(20, 90, 10)] ) for walk_iteration in range(num_walks): logging.info( "Walk iteration: " + str(walk_iteration + 1) + "/" + str(num_walks) ) self.random_state.shuffle(nodes) for node in nodes: walks.append( self.node2vec_walk( walk_length=walk_length, start_node=node, degree_percentiles=degree_percentiles, ) ) return walks def _get_alias_edge(self, source: Any, destination: Any): """ Get the alias edge setup lists for a given edge. """ graph = self.graph p = self.p q = self.q unnormalized_probs = [] for destination_neighbor in sorted(graph.neighbors(destination)): if destination_neighbor == source: unnormalized_probs.append( graph[destination][destination_neighbor].get("weight", 1) / p ) elif graph.has_edge(destination_neighbor, source): unnormalized_probs.append( graph[destination][destination_neighbor].get("weight", 1) ) else: unnormalized_probs.append( graph[destination][destination_neighbor].get("weight", 1) / q ) norm_const = sum(unnormalized_probs) normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs] return _alias_setup(normalized_probs) def _preprocess_transition_probabilities(self, weight_default: float = 1.0): """ Preprocessing of transition probabilities for guiding the random walks. """ graph = self.graph is_directed = self.is_directed alias_nodes = {} total_nodes = len(graph.nodes()) bucket = 0 current_node = 0 quotient = int(total_nodes / 10) logging.info( f"Beginning preprocessing of transition probabilities for {total_nodes} vertices" ) for node in graph.nodes(): current_node += 1 if current_node > bucket * quotient: bucket += 1 logging.info(f"Completed {current_node} / {total_nodes} vertices") unnormalized_probs = [ graph[node][nbr].get("weight", weight_default) for nbr in sorted(graph.neighbors(node)) ] norm_const = sum(unnormalized_probs) normalized_probs = [ float(u_prob) / norm_const for u_prob in unnormalized_probs ] alias_nodes[node] = _alias_setup(normalized_probs) logging.info( f"Completed preprocessing of transition probabilities for vertices" ) alias_edges = {} total_edges = len(graph.edges()) bucket = 0 current_edge = 0 quotient = int(total_edges / 10) logging.info( f"Beginning preprocessing of transition probabilities for {total_edges} edges" ) if is_directed: for edge in graph.edges(): current_edge += 1 if current_edge > bucket * quotient: bucket += 1 logging.info(f"Completed {current_edge} / {total_edges} edges") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) else: for edge in graph.edges(): current_edge += 1 if current_edge > bucket * quotient: bucket += 1 logging.info(f"Completed {current_edge} / {total_edges} edges") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0]) logging.info(f"Completed preprocessing of transition probabilities for edges") self.alias_nodes = alias_nodes self.alias_edges = alias_edges return def _alias_setup(probabilities: List[float]): """ Compute utility lists for non-uniform sampling from discrete distributions. Refer to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details """ number_of_outcomes = len(probabilities) alias = np.zeros(number_of_outcomes) sampled_probabilities = np.zeros(number_of_outcomes, dtype=int) smaller = [] larger = [] for i, prob in enumerate(probabilities): alias[i] = number_of_outcomes * prob if alias[i] < 1.0: smaller.append(i) else: larger.append(i) while len(smaller) > 0 and len(larger) > 0: small = smaller.pop() large = larger.pop() sampled_probabilities[small] = large alias[large] = alias[large] + alias[small] - 1.0 if alias[large] < 1.0: smaller.append(large) else: larger.append(large) return sampled_probabilities, alias def _alias_draw( probabilities: List[float], alias: List[float], random_state: np.random.RandomState ): """ Draw sample from a non-uniform discrete distribution using alias sampling. """ number_of_outcomes = len(probabilities) random_index = int(np.floor(random_state.rand() * number_of_outcomes)) if random_state.rand() < alias[random_index]: return random_index else: return probabilities[random_index]
[((102, 19, 102, 58), 'numpy.random.RandomState', 'np.random.RandomState', (), '', True, 'import numpy as np\n'), ((113, 12, 113, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((128, 10, 128, 21), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((205, 12, 214, 5), 'gensim.models.Word2Vec', 'Word2Vec', (), '', False, 'from gensim.models import Word2Vec\n'), ((489, 12, 489, 40), 'numpy.zeros', 'np.zeros', ({(489, 21, 489, 39): 'number_of_outcomes'}, {}), '(number_of_outcomes)', True, 'import numpy as np\n'), ((490, 28, 490, 67), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((345, 15, 345, 42), 'math.floor', 'math.floor', ({(345, 26, 345, 41): 'new_walk_length'}, {}), '(new_walk_length)', False, 'import math\n'), ((423, 8, 425, 9), 'logging.info', 'logging.info', ({(424, 12, 424, 93): 'f"""Beginning preprocessing of transition probabilities for {total_nodes} vertices"""'}, {}), "(\n f'Beginning preprocessing of transition probabilities for {total_nodes} vertices'\n )", False, 'import logging\n'), ((441, 8, 443, 9), 'logging.info', 'logging.info', ({(442, 12, 442, 79): 'f"""Completed preprocessing of transition probabilities for vertices"""'}, {}), "(\n f'Completed preprocessing of transition probabilities for vertices')", False, 'import logging\n'), ((452, 8, 454, 9), 'logging.info', 'logging.info', ({(453, 12, 453, 90): 'f"""Beginning preprocessing of transition probabilities for {total_edges} edges"""'}, {}), "(\n f'Beginning preprocessing of transition probabilities for {total_edges} edges'\n )", False, 'import logging\n'), ((473, 8, 473, 86), 'logging.info', 'logging.info', ({(473, 21, 473, 85): 'f"""Completed preprocessing of transition probabilities for edges"""'}, {}), "(f'Completed preprocessing of transition probabilities for edges')", False, 'import logging\n'), ((279, 21, 279, 49), 'networkx.degree', 'nx.degree', ({(279, 31, 279, 36): 'graph', (279, 38, 279, 48): 'start_node'}, {}), '(graph, start_node)', True, 'import networkx as nx\n'), ((430, 16, 430, 82), 'logging.info', 'logging.info', ({(430, 29, 430, 81): 'f"""Completed {current_node} / {total_nodes} vertices"""'}, {}), "(f'Completed {current_node} / {total_nodes} vertices')", False, 'import logging\n'), ((118, 58, 118, 69), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((123, 52, 123, 63), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((460, 20, 460, 83), 'logging.info', 'logging.info', ({(460, 33, 460, 82): 'f"""Completed {current_edge} / {total_edges} edges"""'}, {}), "(f'Completed {current_edge} / {total_edges} edges')", False, 'import logging\n'), ((468, 20, 468, 83), 'logging.info', 'logging.info', ({(468, 33, 468, 82): 'f"""Completed {current_edge} / {total_edges} edges"""'}, {}), "(f'Completed {current_edge} / {total_edges} edges')", False, 'import logging\n')]
NotBlizzard/blizzybot
bot.py
41a6f07e4d3bb97772b07aa9d6a3af935b78fb9a
# bot.py # TODO: # organize imports # organize from websocket import create_connection from threading import Thread from battle import Battle import commands import traceback import requests import inspect import json from fractions import Fraction import random import time import sys import re import os from learn import Learn class Bot: pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), "./data/pokedex.json"), "r").read()) pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), "./data/pokemon_teams.json"), "r").read()) def __init__(self, username, password, server, admins, rooms, symbol, avatar, plugins, log): self.start_time = float(time.time()) self.commands = [] self.last_message = {} self.i = 0 self.url = "http://play.pokemonshowdown.com/action.php" self.room = "" self.username = username self.password = password self.joined_all_rooms = False self.avatar = avatar self.server = server self.admins = admins self.rooms = rooms self.symbol = symbol self.battles = [] self.plugins = plugins self.rooms_joined = [] self.log = log self.tiers = ["randombattle", "ou", "ubers", "uu", "ru", "nu", "pu", "lc", "anythinggoes", "battlespotsingles"] def __str__(self): return "<Bot:{}>".format(self.username) def join(self, room): self.ws.send("|/join {}".format(room)) def current_battle(self): return [i for i in self.battles if i.room == self.room][0] def battle(self, message): message[1] = re.sub(r'[^A-z0-9]', '', message[1]) if message[1] == "turn" or message[1] == "start": getattr(self.current_battle()[self.room], "decide")() else: getattr(self.current_battle()[self.room], message[1])(message) def plugin(self, room, plugin, message): self.ws.send("{}|{}".format(room, plugin.run(message, self.last_message[self.room]))) def command(self, message, room, user): cmd = message[4].split(self.symbol)[1].split(" ")[0] try: if " " in message[4]: args = message[4].split("{} ".format(cmd))[1] else: args = [] command = getattr(commands, "command_{}".format(cmd), __name__)(args, room.strip().lower(), user.lower(), self) self.ws.send("{}|{}".format(room, command)) except (IndexError, TypeError): print(traceback.print_exc()) self.ws.send("{}|Luffy: so it's a mystery command! (\"{}\" is not recognized)".format(room, cmd)) except: print(traceback.print_exc()) self.ws.send("{}|Something went wrong.".format(room)) def login(self, message): key = message[2] challenge = message[3] if self.password == "": data = { "act": "getassertion", "userid": self.username, "challengekeyid": key, "challenge": challenge } data = requests.get(self.url, data=data) self.ws.send("|/trn {},0,{}".format(self.username, data.text)) else: data = { "act": "login", "name": self.username, "pass": self.password, "challengekeyid": key, "challenge": challenge } data = requests.post(self.url, data=data) data = json.loads(data.text.split("]")[1]) self.ws.send("|/trn {},0,{}".format(self.username, data["assertion"])) def disconnect(self): self.ws = None sys.exit() def start(self): try: self.connect() except SystemExit: return sys.exit() def message(self, messages): timestamp = int(messages[2]) user = messages[3] print(self.room) print(self.rooms_joined) match_line = [x for x in self.plugins if re.match(x.match_line, messages[4], flags=re.IGNORECASE)] if len(match_line) > 0 and self.room in self.rooms_joined: plugin = [x for x in self.plugins if x == match_line[0]][0] if self.room == "lobby": self.room = "" self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start()) if self.room in self.rooms_joined and messages[4][0] == self.symbol: if self.room == "lobby": self.room = "" self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start()) def battle_message(self, messages): user = re.sub(r'[^A-z0-9]', '', messages[2]) if messages[3][0] == self.symbol: messages = [""] + messages # now the list has five elements. self.commands.append(Thread(target=self.command, args=(messages, self.room, " " + user)).start()) def raw(self, messages): if self.rooms[self.i] not in self.rooms_joined and "infobox" in messages[2]: if self.rooms[self.i] == "lobby": self.rooms[self.i] = "" self.rooms_joined.append(self.rooms[self.i]) if len(self.rooms) > self.i + 1: self.i += 1 def update(self): [self.join(room) for room in self.rooms] def request(self, messages): data = [x for x in self.battles if self.room in str(x)] battle_tier = re.search("battle-(.+)-(\d+)", self.room).group(1) if len(data) == 0: # new battle self.battles.append(Battle(battle_tier, self.room, self)) print("NEW BATTLE") self.battles[-1].run(messages) else: pass def update_battle(self, messages): data = json.loads(messages[2]) if len(data["challengesFrom"].keys()) > 0: who = list(data["challengesFrom"].keys())[0] tier = data["challengesFrom"][who] if tier in self.tiers: if "random" not in tier: team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send("|/utm {}".format(team)) self.ws.send("|/accept {}".format(who)) def connect(self): self.ws = create_connection("ws://{}/showdown/websocket".format(self.server)) while True: messages = [x for x in self.ws.recv().split("\n")] for message in messages: print("it is ") print(self.rooms_joined) if self.log: print(message.encode("utf-8", "ignore")) try: if ">" in self.last_message: self.room = message[1:] except: self.room = "" # lobby message = message.split("|") # battles if self.room in [x.room for x in self.battles] and len(message) > 1: battle = [i for i in self.battles if i.room == self.room][0] battle.run(message) if len(message) > 1: if message[1] == "c:": self.message(message) self.last_message[self.room] = message elif message[1] == "title": room = re.sub(r' ', '', message[2].lower()) self.rooms_joined.append(room) elif message[1] == "raw": self.raw(message) elif message[1] == "c": self.battle_message(message) elif message[1] == "challstr": self.login(message) elif message[1] == "updateuser": if not self.joined_all_rooms: for room in self.rooms: self.join(room) self.joined_all_rooms = True elif message[1] == "request": self.request(message) elif message[1] == "updatechallenges": self.update_battle(message) else: pass
[((59, 21, 59, 57), 're.sub', 're.sub', ({(59, 28, 59, 40): '"""[^A-z0-9]"""', (59, 42, 59, 44): '""""""', (59, 46, 59, 56): 'message[1]'}, {}), "('[^A-z0-9]', '', message[1])", False, 'import re\n'), ((101, 8, 101, 18), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((130, 15, 130, 52), 're.sub', 're.sub', ({(130, 22, 130, 34): '"""[^A-z0-9]"""', (130, 36, 130, 38): '""""""', (130, 40, 130, 51): 'messages[2]'}, {}), "('[^A-z0-9]', '', messages[2])", False, 'import re\n'), ((160, 15, 160, 38), 'json.loads', 'json.loads', ({(160, 26, 160, 37): 'messages[2]'}, {}), '(messages[2])', False, 'import json\n'), ((28, 32, 28, 43), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((91, 19, 91, 52), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((95, 19, 95, 53), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((107, 19, 107, 29), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((115, 49, 115, 105), 're.match', 're.match', (), '', False, 'import re\n'), ((151, 22, 151, 63), 're.search', 're.search', ({(151, 32, 151, 51): '"""battle-(.+)-(\\\\d+)"""', (151, 53, 151, 62): 'self.room'}, {}), "('battle-(.+)-(\\\\d+)', self.room)", False, 'import re\n'), ((153, 32, 153, 68), 'battle.Battle', 'Battle', ({(153, 39, 153, 50): 'battle_tier', (153, 52, 153, 61): 'self.room', (153, 63, 153, 67): 'self'}, {}), '(battle_tier, self.room, self)', False, 'from battle import Battle\n'), ((79, 18, 79, 39), 'traceback.print_exc', 'traceback.print_exc', ({}, {}), '()', False, 'import traceback\n'), ((82, 18, 82, 39), 'traceback.print_exc', 'traceback.print_exc', ({}, {}), '()', False, 'import traceback\n'), ((24, 43, 24, 68), 'os.path.dirname', 'os.path.dirname', ({(24, 59, 24, 67): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((25, 49, 25, 74), 'os.path.dirname', 'os.path.dirname', ({(25, 65, 25, 73): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((121, 33, 121, 95), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n'), ((127, 33, 127, 94), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n'), ((133, 33, 133, 100), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n')]
Yao-14/stAnalysis
stRT/tdr/widgets/changes.py
d08483ce581f5b03cfcad8be500aaa64b0293f74
from typing import Optional, Tuple, Union import numpy as np import pandas as pd import pyvista as pv from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid try: from typing import Literal except ImportError: from typing_extensions import Literal from .ddrtree import DDRTree, cal_ncenter from .slice import euclidean_distance, three_d_slice #################################### # Changes along a vector direction # #################################### def changes_along_line( model: Union[PolyData, UnstructuredGrid], key: Union[str, list] = None, n_points: int = 100, vec: Union[tuple, list] = (1, 0, 0), center: Union[tuple, list] = None, ) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]: slices, line_points, line = three_d_slice( model=model, method="line", n_slices=n_points, vec=vec, center=center ) x, y = [], [] x_length = 0 for slice, (point_i, point) in zip(slices, enumerate(line_points)): change_value = np.asarray(slice[key]).sum() y.append(change_value) if point_i == 0: x.append(0) else: point1 = line_points[point_i - 1].points.flatten() point2 = line_points[point_i].points.flatten() ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3) x_length += ed x.append(x_length) return np.asarray(x), np.asarray(y), slices, line ################################# # Changes along the model shape # ################################# def changes_along_shape( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, key_added: Optional[str] = "rd_spatial", dim: int = 2, inplace: bool = False, **kwargs, ): model = model.copy() if not inplace else model X = model.points if spatial_key is None else model[spatial_key] DDRTree_kwargs = { "maxIter": 10, "sigma": 0.001, "gamma": 10, "eps": 0, "dim": dim, "Lambda": 5 * X.shape[1], "ncenter": cal_ncenter(X.shape[1]), } DDRTree_kwargs.update(kwargs) Z, Y, stree, R, W, Q, C, objs = DDRTree(X, **DDRTree_kwargs) # Obtain the real part of the complex argument model[key_added] = np.real(W).astype(np.float64) return model if not inplace else None ############################## # Changes along the branches # ############################## def ElPiGraph_tree( X: np.ndarray, NumNodes: int = 50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: """ Generate a principal elastic tree. Reference: Albergante et al. (2020), Robust and Scalable Learning of Complex Intrinsic Dataset Geometry via ElPiGraph. Args: X: DxN, data matrix list. NumNodes: The number of nodes of the principal graph. Use a range of 10 to 100 for ElPiGraph approach. **kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree. For details, please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes: The nodes in the principal tree. edges: The edges between nodes in the principal tree. """ try: import elpigraph except ImportError: raise ImportError( "You need to install the package `elpigraph-python`." "\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`." ) ElPiGraph_kwargs = { "alpha": 0.01, "FinalEnergy": "Penalized", "StoreGraphEvolution": True, "GPU": False, } ElPiGraph_kwargs.update(kwargs) if ElPiGraph_kwargs["GPU"] is True: try: import cupy except ImportError: raise ImportError( "You need to install the package `cupy`." "\nInstall cupy via `pip install cupy-cuda113`." ) elpi_tree = elpigraph.computeElasticPrincipalTree( X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs ) nodes = elpi_tree[0]["NodePositions"] # ['AllNodePositions'][k] matrix_edges_weights = elpi_tree[0]["ElasticMatrix"] # ['AllElasticMatrices'][k] matrix_edges_weights = np.triu(matrix_edges_weights, 1) edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return nodes, edges def SimplePPT_tree( X: np.ndarray, NumNodes: int = 50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: """ Generate a simple principal tree. Reference: Mao et al. (2015), SimplePPT: A simple principal tree algorithm, SIAM International Conference on Data Mining. Args: X: DxN, data matrix list. NumNodes: The number of nodes of the principal graph. Use a range of 100 to 2000 for PPT approach. **kwargs: Other parameters used in simpleppt.ppt. For details, please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes: The nodes in the principal tree. edges: The edges between nodes in the principal tree. """ try: import igraph import simpleppt except ImportError: raise ImportError( "You need to install the package `simpleppt` and `igraph`." "\nInstall simpleppt via `pip install -U simpleppt`." "\nInstall igraph via `pip install -U igraph`" ) SimplePPT_kwargs = { "seed": 1, "lam": 10, } SimplePPT_kwargs.update(kwargs) X = np.asarray(X) ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs) R = ppt_tree.R nodes = (np.dot(X.T, R) / R.sum(axis=0)).T B = ppt_tree.B edges = np.array( igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected").get_edgelist() ) return nodes, edges def map_points_to_branch( model: Union[PolyData, UnstructuredGrid], nodes: np.ndarray, spatial_key: Optional[str] = None, key_added: Optional[str] = "nodes", inplace: bool = False, **kwargs, ): """ Find the closest principal tree node to any point in the model through KDTree. Args: model: A reconstruct model. nodes: The nodes in the principal tree. spatial_key: The key that corresponds to the coordinates of the point in the model. If spatial_key is None, the coordinates are model.points. key_added: The key under which to add the nodes labels. inplace: Updates model in-place. kwargs: Other parameters used in scipy.spatial.KDTree. Returns: A model, which contains the following properties: `model.point_data[key_added]`, the nodes labels array. """ from scipy.spatial import KDTree model = model.copy() if not inplace else model X = model.points if spatial_key is None else model[spatial_key] nodes_kdtree = KDTree(np.asarray(nodes), **kwargs) _, ii = nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added] = ii return model if not inplace else None def map_gene_to_branch( model: Union[PolyData, UnstructuredGrid], tree: PolyData, key: Union[str, list], nodes_key: Optional[str] = "nodes", inplace: bool = False, ): """ Find the closest principal tree node to any point in the model through KDTree. Args: model: A reconstruct model contains the gene expression label. tree: A three-dims principal tree model contains the nodes label. key: The key that corresponds to the gene expression. nodes_key: The key that corresponds to the coordinates of the nodes in the tree. inplace: Updates tree model in-place. Returns: A tree, which contains the following properties: `tree.point_data[key]`, the gene expression array. """ model = model.copy() model_data = pd.DataFrame(model[nodes_key], columns=["nodes_id"]) key = [key] if isinstance(key, str) else key for sub_key in key: model_data[sub_key] = np.asarray(model[sub_key]) model_data = model_data.groupby(by="nodes_id").sum() model_data["nodes_id"] = model_data.index model_data.index = range(len(model_data.index)) tree = tree.copy() if not inplace else tree tree_data = pd.DataFrame(tree[nodes_key], columns=["nodes_id"]) tree_data = pd.merge(tree_data, model_data, how="outer", on="nodes_id") tree_data.fillna(value=0, inplace=True) for sub_key in key: tree.point_data[sub_key] = tree_data[sub_key].values return tree if not inplace else None def construct_tree_model( nodes: np.ndarray, edges: np.ndarray, key_added: Optional[str] = "nodes", ) -> PolyData: """ Construct a principal tree model. Args: nodes: The nodes in the principal tree. edges: The edges between nodes in the principal tree. key_added: The key under which to add the nodes labels. Returns: A three-dims principal tree model, which contains the following properties: `tree_model.point_data[key_added]`, the nodes labels array. """ padding = np.empty(edges.shape[0], int) * 2 padding[:] = 2 edges_w_padding = np.vstack((padding, edges.T)).T tree_model = pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added] = np.arange(0, len(nodes), 1) return tree_model def changes_along_branch( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, map_key: Union[str, list] = None, key_added: Optional[str] = "nodes", rd_method: Literal["ElPiGraph", "SimplePPT"] = "ElPiGraph", NumNodes: int = 50, inplace: bool = False, **kwargs, ) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]: model = model.copy() if not inplace else model X = model.points if spatial_key is None else model[spatial_key] if rd_method == "ElPiGraph": nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs) elif rd_method == "SimplePPT": nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs) else: raise ValueError( "`rd_method` value is wrong." "\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`." ) map_points_to_branch( model=model, nodes=nodes, spatial_key=spatial_key, key_added=key_added, inplace=True, ) tree_model = construct_tree_model(nodes=nodes, edges=edges) if not (map_key is None): map_gene_to_branch( model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True ) return model if not inplace else None, tree_model
[((140, 27, 140, 59), 'numpy.triu', 'np.triu', ({(140, 35, 140, 55): 'matrix_edges_weights', (140, 57, 140, 58): '1'}, {}), '(matrix_edges_weights, 1)', True, 'import numpy as np\n'), ((181, 8, 181, 21), 'numpy.asarray', 'np.asarray', ({(181, 19, 181, 20): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((182, 15, 182, 69), 'simpleppt.ppt', 'simpleppt.ppt', (), '', False, 'import simpleppt\n'), ((254, 17, 254, 69), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((264, 16, 264, 67), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((265, 16, 265, 75), 'pandas.merge', 'pd.merge', (), '', True, 'import pandas as pd\n'), ((294, 17, 294, 52), 'pyvista.PolyData', 'pv.PolyData', ({(294, 29, 294, 34): 'nodes', (294, 36, 294, 51): 'edges_w_padding'}, {}), '(nodes, edges_w_padding)', True, 'import pyvista as pv\n'), ((49, 11, 49, 24), 'numpy.asarray', 'np.asarray', ({(49, 22, 49, 23): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((49, 26, 49, 39), 'numpy.asarray', 'np.asarray', ({(49, 37, 49, 38): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((224, 26, 224, 43), 'numpy.asarray', 'np.asarray', ({(224, 37, 224, 42): 'nodes'}, {}), '(nodes)', True, 'import numpy as np\n'), ((225, 31, 225, 44), 'numpy.asarray', 'np.asarray', ({(225, 42, 225, 43): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((257, 30, 257, 56), 'numpy.asarray', 'np.asarray', ({(257, 41, 257, 55): 'model[sub_key]'}, {}), '(model[sub_key])', True, 'import numpy as np\n'), ((291, 14, 291, 43), 'numpy.empty', 'np.empty', ({(291, 23, 291, 37): 'edges.shape[0]', (291, 39, 291, 42): 'int'}, {}), '(edges.shape[0], int)', True, 'import numpy as np\n'), ((293, 22, 293, 51), 'numpy.vstack', 'np.vstack', ({(293, 32, 293, 50): '(padding, edges.T)'}, {}), '((padding, edges.T))', True, 'import numpy as np\n'), ((81, 23, 81, 33), 'numpy.real', 'np.real', ({(81, 31, 81, 32): 'W'}, {}), '(W)', True, 'import numpy as np\n'), ((135, 10, 135, 23), 'numpy.asarray', 'np.asarray', ({(135, 21, 135, 22): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((185, 13, 185, 27), 'numpy.dot', 'np.dot', ({(185, 20, 185, 23): 'X.T', (185, 25, 185, 26): 'R'}, {}), '(X.T, R)', True, 'import numpy as np\n'), ((35, 23, 35, 45), 'numpy.asarray', 'np.asarray', ({(35, 34, 35, 44): 'slice[key]'}, {}), '(slice[key])', True, 'import numpy as np\n'), ((141, 21, 141, 53), 'numpy.nonzero', 'np.nonzero', ({(141, 32, 141, 52): 'matrix_edges_weights'}, {}), '(matrix_edges_weights)', True, 'import numpy as np\n')]
nkoshkina/Python_Training3
test/test_add_group.py
e917440d37883dbcaa527a0700bcfa1478a1c1ce
# -*- coding: utf-8 -*- from model.group import Group import pytest import allure_pytest def test_add_group(app, db, check_ui, json_groups): group0 = json_groups #with pytest.allure.step("Given a group list"): old_groups = db.get_group_list() #with pytest.allure.step("When I add a group %s to the list" % group0): app.group.create(group0) #assert app.group.count() == len(old_groups) + 1 #with pytest.allure.step("When the new groups list is equal old list with added group"): new_groups = db.get_group_list() old_groups.append(group0) assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max) if check_ui: print("CHECK_UI") assert sorted(new_groups, key=Group.id_or_max) == \ sorted(app.group.get_groups_list(), key=Group.id_or_max)
[]
testinggg-art/Cyberbrain
cyberbrain/frame_tree.py
e38c74c174e23aa386d005b03f09b30aa1b3a0ae
from __future__ import annotations from .frame import Frame from .generated.communication_pb2 import CursorPosition class FrameTree: """A tree to store all frames. For now it's a fake implementation. Each node in the tree represents a frame that ever exists during program execution. Caller and callee frames are connected. Call order is preserved among callee frames of the same caller frame. Nodes are also indexed by frames' physical location (file name, line range). TODO: - Add indexes. - Implement frame search. """ # Keyed by frame ID. frames: dict[str, Frame] = dict() @classmethod def add_frame(cls, frame_id, frame: Frame): cls.frames[frame_id] = frame print(frame_id, frame) @classmethod def find_frames(cls, position: CursorPosition) -> list[Frame]: """ Right now it's a fake implementation, where we return the only existing frame. """ assert cls.frames return [next(iter(cls.frames.values()))] @classmethod def get_frame(cls, frame_id) -> Frame: assert cls.frames return cls.frames[frame_id]
[]
moggers87/django-otp-yubikey
src/otp_yubikey/models.py
2d7cf9dc91ba57b65aa62254532997cc1e6261dd
from __future__ import absolute_import, division, print_function, unicode_literals from base64 import b64decode from binascii import hexlify, unhexlify from struct import pack import six from django.db import models from django.utils.encoding import force_text from django_otp.models import Device from django_otp.util import hex_validator, random_hex from yubiotp.client import YubiClient10, YubiClient11, YubiClient20 from yubiotp.modhex import modhex from yubiotp.otp import decode_otp def default_id(): return force_text(random_hex(6)) def id_validator(value): return hex_validator(6)(value) def default_key(): return force_text(random_hex(16)) def key_validator(value): return hex_validator(16)(value) class YubikeyDevice(Device): """ Represents a locally-verified YubiKey OTP :class:`~django_otp.models.Device`. .. attribute:: private_id *CharField*: The 6-byte private ID (hex-encoded). .. attribute:: key *CharField*: The 16-byte AES key shared with this YubiKey (hex-encoded). .. attribute:: session *PositiveIntegerField*: The non-volatile session counter most recently used by this device. .. attribute:: counter *PositiveIntegerField*: The volatile session usage counter most recently used by this device. """ private_id = models.CharField( max_length=12, validators=[id_validator], default=default_id, verbose_name="Private ID", help_text="The 6-byte private ID (hex-encoded)." ) key = models.CharField( max_length=32, validators=[key_validator], default=default_key, help_text="The 16-byte AES key shared with this YubiKey (hex-encoded)." ) session = models.PositiveIntegerField( default=0, help_text="The non-volatile session counter most recently used by this device." ) counter = models.PositiveIntegerField( default=0, help_text="The volatile session usage counter most recently used by this device." ) class Meta(Device.Meta): verbose_name = "Local YubiKey device" def public_id(self): """ The public ID of this device is the four-byte, big-endian, modhex-encoded primary key. """ return modhex(pack('>I', self.id)) public_id.short_description = 'Public Identity' public_id.admin_order_field = 'id' @property def bin_key(self): return unhexlify(self.key.encode()) def verify_token(self, token): if isinstance(token, six.text_type): token = token.encode('utf-8') try: public_id, otp = decode_otp(token, self.bin_key) except Exception: return False if public_id != self.public_id(): return False if hexlify(otp.uid) != self.private_id.encode(): return False if otp.session < self.session: return False if (otp.session == self.session) and (otp.counter <= self.counter): return False # All tests pass. Update the counters and return the good news. self.session = otp.session self.counter = otp.counter self.save() return True class ValidationService(models.Model): """ Represents a YubiKey validation web service. By default, this will point to Yubico's official hosted service, which you can customize. You can also create instances to point at any other service implementing the same protocol. .. attribute:: name *CharField*: The name of this validation service. .. attribute:: api_id *IntegerField*: Your API ID. The server needs this to sign responsees. (Default: 1) .. attribute:: api_key *CharField*: Your base64-encoded API key, used to sign requests. This is optional but strongly recommended. (Default: ``''``) .. attribute:: base_url *URLField*: The base URL of the verification service. Defaults to Yubico's hosted API. .. attribute:: api_version *CharField*: The version of the validation API to use: '1.0', '1.1', or '2.0'. (Default: '2.0') .. attribute:: use_ssl *BooleanField*: If ``True``, we'll use the HTTPS versions of the default URLs. Because :mod:`urllib2` does not verify certificates, this provides little benefit. (Default: ``False``). .. attribute:: param_sl *CharField*: The level of syncing required. See :class:`~yubiotp.client.YubiClient20`. .. attribute:: param_timeout *CharField*: The time to allow for syncing. See :class:`~yubiotp.client.YubiClient20`. """ API_VERSIONS = ['1.0', '1.1', '2.0'] name = models.CharField( max_length=32, help_text="The name of this validation service." ) api_id = models.IntegerField( default=1, verbose_name="API ID", help_text="Your API ID." ) api_key = models.CharField( max_length=64, blank=True, default='', verbose_name="API key", help_text="Your base64-encoded API key." ) base_url = models.URLField( blank=True, default='', verbose_name="Base URL", help_text="The base URL of the verification service. Defaults to Yubico's hosted API." ) api_version = models.CharField( max_length=8, choices=list(zip(API_VERSIONS, API_VERSIONS)), default='2.0', help_text="The version of the validation api to use." ) use_ssl = models.BooleanField( default=False, verbose_name="Use SSL", help_text="Use HTTPS API URLs by default?" ) param_sl = models.CharField( max_length=16, blank=True, default=None, verbose_name="SL", help_text="The level of syncing required." ) param_timeout = models.CharField( max_length=16, blank=True, default=None, verbose_name="Timeout", help_text="The time to allow for syncing." ) class Meta(object): verbose_name = "YubiKey validation service" def __unicode__(self): return self.name def get_client(self): api_key = b64decode(self.api_key.encode()) or None if self.api_version == '2.0': client = YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl or None, self.param_timeout or None) elif self.api_version == '1.1': client = YubiClient11(self.api_id, api_key, self.use_ssl) else: client = YubiClient10(self.api_id, api_key, self.use_ssl) if self.base_url: client.base_url = self.base_url return client class RemoteYubikeyDevice(Device): """ Represents a YubiKey device that is to be verified with a remote validation service. In order create these devices, you must have at least one :class:`~otp_yubikey.models.ValidationService` in the database. .. attribute:: service *ForeignKey*: The validation service to use for this device. .. attribute:: public_id *CharField*: The public identity of the YubiKey (modhex-encoded). """ service = models.ForeignKey(ValidationService, on_delete=models.CASCADE) public_id = models.CharField(max_length=32, verbose_name="Public ID", help_text="The public identity of the YubiKey (modhex-encoded).") class Meta(Device.Meta): verbose_name = "Remote YubiKey device" def verify_token(self, token): verified = False if token[:-32] == self.public_id: client = self.service.get_client() response = client.verify(token) verified = response.is_ok() return verified
[((59, 17, 65, 5), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((67, 10, 72, 5), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((74, 14, 77, 5), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import models\n'), ((79, 14, 82, 5), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import models\n'), ((178, 11, 181, 5), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((183, 13, 187, 5), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((189, 14, 195, 5), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((197, 15, 202, 5), 'django.db.models.URLField', 'models.URLField', (), '', False, 'from django.db import models\n'), ((211, 14, 215, 5), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import models\n'), ((217, 15, 223, 5), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((225, 20, 231, 5), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((269, 14, 269, 76), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((270, 16, 270, 139), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((20, 22, 20, 35), 'django_otp.util.random_hex', 'random_hex', ({(20, 33, 20, 34): '(6)'}, {}), '(6)', False, 'from django_otp.util import hex_validator, random_hex\n'), ((24, 11, 24, 27), 'django_otp.util.hex_validator', 'hex_validator', ({(24, 25, 24, 26): '(6)'}, {}), '(6)', False, 'from django_otp.util import hex_validator, random_hex\n'), ((28, 22, 28, 36), 'django_otp.util.random_hex', 'random_hex', ({(28, 33, 28, 35): '(16)'}, {}), '(16)', False, 'from django_otp.util import hex_validator, random_hex\n'), ((32, 11, 32, 28), 'django_otp.util.hex_validator', 'hex_validator', ({(32, 25, 32, 27): '(16)'}, {}), '(16)', False, 'from django_otp.util import hex_validator, random_hex\n'), ((92, 22, 92, 41), 'struct.pack', 'pack', ({(92, 27, 92, 31): '""">I"""', (92, 33, 92, 40): 'self.id'}, {}), "('>I', self.id)", False, 'from struct import pack\n'), ((105, 29, 105, 60), 'yubiotp.otp.decode_otp', 'decode_otp', ({(105, 40, 105, 45): 'token', (105, 47, 105, 59): 'self.bin_key'}, {}), '(token, self.bin_key)', False, 'from yubiotp.otp import decode_otp\n'), ((112, 11, 112, 27), 'binascii.hexlify', 'hexlify', ({(112, 19, 112, 26): 'otp.uid'}, {}), '(otp.uid)', False, 'from binascii import hexlify, unhexlify\n'), ((243, 21, 243, 127), 'yubiotp.client.YubiClient20', 'YubiClient20', ({(243, 34, 243, 45): 'self.api_id', (243, 47, 243, 54): 'api_key', (243, 56, 243, 68): 'self.use_ssl', (243, 70, 243, 75): 'False', (243, 77, 243, 98): 'self.param_sl or None', (243, 100, 243, 126): 'self.param_timeout or None'}, {}), '(self.api_id, api_key, self.use_ssl, False, self.param_sl or\n None, self.param_timeout or None)', False, 'from yubiotp.client import YubiClient10, YubiClient11, YubiClient20\n'), ((245, 21, 245, 69), 'yubiotp.client.YubiClient11', 'YubiClient11', ({(245, 34, 245, 45): 'self.api_id', (245, 47, 245, 54): 'api_key', (245, 56, 245, 68): 'self.use_ssl'}, {}), '(self.api_id, api_key, self.use_ssl)', False, 'from yubiotp.client import YubiClient10, YubiClient11, YubiClient20\n'), ((247, 21, 247, 69), 'yubiotp.client.YubiClient10', 'YubiClient10', ({(247, 34, 247, 45): 'self.api_id', (247, 47, 247, 54): 'api_key', (247, 56, 247, 68): 'self.use_ssl'}, {}), '(self.api_id, api_key, self.use_ssl)', False, 'from yubiotp.client import YubiClient10, YubiClient11, YubiClient20\n')]
gavinIRL/RHBot
v1/hsvfilter.py
1e22ae5ca7b67ebd6a72c23d9f46d5a8eb6e99cf
import typing # custom data structure to hold the state of an HSV filter class HsvFilter: def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None, sAdd=None, sSub=None, vAdd=None, vSub=None): self.hMin = hMin self.sMin = sMin self.vMin = vMin self.hMax = hMax self.sMax = sMax self.vMax = vMax self.sAdd = sAdd self.sSub = sSub self.vAdd = vAdd self.vSub = vSub # Putting this here out of the way as it's a chonk # For a given item string case it will return the optimal filter and the correct position to look def grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter, list]: if object_name is None: #print("Using default filter") return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [3, 32, 1280, 794] if object_name == "dungeon_check": return HsvFilter(0, 73, 94, 106, 255, 255, 0, 0, 0, 0), [1083, 295, 1188, 368] if object_name == "enemy_map_loc": #print("Using enemy location filter") if kwargs.get("big_map"): return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [485, 280, 900, 734] return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210] if object_name == "player_map_loc": if kwargs.get("big_map"): return HsvFilter(31, 94, 86, 73, 255, 255, 0, 0, 0, 0), [485, 280, 900, 734] return HsvFilter(31, 94, 86, 73, 255, 255, 0, 0, 0, 0), [1100, 50, 1260, 210] if object_name == "other_player_map_loc": if kwargs.get("big_map"): return HsvFilter(16, 172, 194, 32, 255, 255, 0, 0, 70, 37), [485, 280, 900, 734] return HsvFilter(16, 172, 194, 32, 255, 255, 0, 0, 70, 37), [1100, 50, 1260, 210] if object_name == "loot_distant": return HsvFilter(14, 116, 33, 32, 210, 59, 16, 0, 3, 0), [10, 145, 1084, 684] if object_name == "loot_near": return HsvFilter(0, 155, 135, 31, 240, 217, 0, 0, 0, 0), [460, 420, 855, 710] if object_name == "prompt_press_x_pickup": return HsvFilter(78, 110, 110, 97, 189, 255, 0, 0, 0, 0), [1080, 660, 1255, 725] if object_name == "message_section_cleared": return HsvFilter(0, 0, 214, 179, 65, 255, 0, 0, 0, 17), [464, 600, 855, 680] if object_name == "message_go": return HsvFilter(32, 114, 89, 58, 255, 255, 0, 12, 0, 0), [600, 222, 700, 275] if object_name == "enemy_nametag": return HsvFilter(49, 0, 139, 91, 30, 197, 0, 0, 40, 38), [10, 145, 1084, 684] if object_name == "message_boss_encounter": return HsvFilter(0, 92, 128, 13, 255, 255, 0, 0, 0, 0), [630, 520, 1120, 680] if object_name == "display_boss_name_and_healthbar": return HsvFilter(0, 92, 123, 29, 255, 255, 0, 0, 0, 20), [415, 533, 888, 700] if object_name == "loot_chest_normal": # This is a difficult one to separate return HsvFilter(0, 34, 38, 28, 152, 124, 0, 0, 5, 12), [10, 145, 1084, 684] if object_name == "map_outline": if kwargs.get("big_map"): return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [485, 280, 900, 734] return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210] if object_name == "gate_map_pos": # This is a very difficult one to separate if kwargs.get("big_map"): return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [485, 280, 900, 734] return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210] if object_name == "prompt_move_reward_screen": return HsvFilter(72, 98, 92, 105, 255, 225, 0, 54, 24, 38) if object_name == "prompt_select_card": return HsvFilter(79, 149, 140, 255, 255, 255, 0, 0, 0, 0) if object_name == "event_chest_special_appear": return HsvFilter(0, 124, 62, 88, 217, 246, 0, 0, 0, 0) if object_name == "inventory_green_item": return HsvFilter(37, 147, 0, 61, 255, 255, 0, 0, 0, 0) if object_name == "inventory_blue_item": return HsvFilter(79, 169, 0, 109, 246, 188, 0, 0, 0, 0) if object_name == "inventory_yellow_item": # This is a dangerous one as it can barely # distinguish against green items and vice versa return HsvFilter(19, 91, 107, 31, 168, 181, 0, 11, 32, 21) if object_name == "inventory_purple_item": return HsvFilter(126, 153, 0, 255, 255, 255, 0, 0, 0, 0) if object_name == "button_repair": return None, [208, 600] # These are all To be done later if object_name == "event_card_trade": return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0) if object_name == "event_otherworld": return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0) if object_name == "loot_chest_special": if kwargs.get("big_map"): return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [10, 145, 1084, 684] return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [10, 145, 1084, 684] if object_name == "cards": return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [735, 32, 1085, 100] if object_name == "enemy_arrow": return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0), [10, 145, 1084, 684] # Buttons for clicking, known positions if object_name == "button_explore_again": return None, [] if object_name == "button_choose_map": return None, [] if object_name == "button_open_store": return None, [] if object_name == "button_go_town": return None, [] if object_name == "button_inv_equipment": return None, [] if object_name == "button_inv_consume": return None, [] if object_name == "button_inv_other": return None, [] if object_name == "button_repair_confirm": return None, [] if object_name == "inv_grid_location": return None, [533+44*kwargs.get("col"), 277+44*kwargs.get("row")]
[]
HPLegion/glue
glue/core/tests/test_state_objects.py
1843787ccb4de852dfe103ff58473da13faccf5f
import numpy as np from numpy.testing import assert_allclose from echo import CallbackProperty, ListCallbackProperty from glue.core import Data, DataCollection from .test_state import clone from ..state_objects import (State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper, StateAttributeHistogramHelper) class SimpleTestState(State): a = CallbackProperty() b = CallbackProperty() flat = ListCallbackProperty() nested = ListCallbackProperty() def test_state_serialization(): state1 = SimpleTestState() state1.a = 2 state1.b = 'hello' state1.flat = [1, 3, 4] sub_state = SimpleTestState() sub_state.a = 3 sub_state.b = 'blah' sub_state.flat = [1, 2] sub_state.nested = [] state1.nested = [1, 3, sub_state] state2 = clone(state1) assert state2.a == 2 assert state2.b == 'hello' assert state2.flat == [1, 3, 4] assert state2.nested[0:2] == [1, 3] assert state2.nested[2].a == 3 assert state2.nested[2].b == 'blah' assert state2.nested[2].flat == [1, 2] assert state2.nested[2].nested == [] EXPECTED_STR = """ a: 2 b: hello flat: <CallbackList with 3 elements> nested: <CallbackList with 3 elements> """ EXPECTED_REPR = """ <SimpleTestState a: 2 b: hello flat: <CallbackList with 3 elements> nested: <CallbackList with 3 elements> > """ def test_state_str_repr(): state1 = SimpleTestState() state1.a = 2 state1.b = 'hello' state1.flat = [1, 3, 4] sub_state = SimpleTestState() state1.nested = [1, 3, sub_state] assert str(state1) == EXPECTED_STR.strip() assert repr(state1) == EXPECTED_REPR.strip() class TestStateAttributeLimitsHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() log = CallbackProperty(False) scale = CallbackProperty(100) self.state = SimpleState() self.helper = StateAttributeLimitsHelper(self.state, attribute='comp', lower='lower', upper='upper', percentile='scale', log='log') self.state.data = self.data self.state.comp = self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1] def test_minmax(self): assert self.helper.lower == -100 assert self.helper.upper == +100 def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.lower == 2 assert self.helper.upper == 3 self.helper.attribute = self.x_id assert self.helper.lower == -100 assert self.helper.upper == +100 def test_change_percentile(self): # Changing scale mode updates the limits self.helper.percentile = 99.5 assert_allclose(self.helper.lower, -99.5) assert_allclose(self.helper.upper, +99.5) self.helper.percentile = 99 assert_allclose(self.helper.lower, -99) assert_allclose(self.helper.upper, +99) self.helper.percentile = 90 assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) # When switching to custom, the last limits are retained self.helper.percentile = "Custom" assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) def test_percentile_cached(self): # Make sure that if we change scale and change attribute, the scale # modes are cached on a per-attribute basis. self.helper.percentile = 99.5 self.state.comp = self.y_id assert self.helper.percentile == 100 self.helper.percentile = 99 self.state.comp = self.x_id assert self.helper.percentile == 99.5 self.state.comp = self.y_id assert self.helper.percentile == 99 def test_flip_button(self): self.helper.flip_limits() assert self.helper.lower == +100 assert self.helper.upper == -100 # Make sure that values were re-cached when flipping self.state.comp = self.y_id assert self.helper.lower == 2 assert self.helper.upper == 3 self.state.comp = self.x_id assert self.helper.lower == +100 assert self.helper.upper == -100 def test_manual_edit(self): # Make sure that values are re-cached when edited manually self.helper.percentile = "Custom" self.state.lower = -122 self.state.upper = 234 self.helper.log = True assert self.helper.lower == -122 assert self.helper.upper == 234 assert self.helper.log self.state.comp = self.y_id assert self.helper.lower == 2 assert self.helper.upper == 3 assert not self.helper.log self.state.comp = self.x_id assert self.helper.lower == -122 assert self.helper.upper == 234 assert self.helper.log class TestStateAttributeSingleValueHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100, 30, 9999), y=np.linspace(2, 3, 9999), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() val = CallbackProperty() self.state = SimpleState() self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp', function=np.nanmedian, value='val') self.state.data = self.data self.state.comp = self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1] def test_value(self): assert self.helper.value == -35. def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.value == 2.5 self.helper.attribute = self.x_id assert self.helper.value == -35 def test_manual_edit(self): self.state.val = 42. assert self.helper.value == 42 self.state.comp = self.y_id assert self.helper.value == 2.5 self.state.comp = self.x_id assert self.helper.value == 42 class TestStateAttributeHistogramHelper(): def setup_method(self, method): self.data = Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1, 2.3], y=['a', 'f', 'd', 'e', 'f', 'f', 'a'], label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() x_min = CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() self.state = SimpleState() self.helper = StateAttributeHistogramHelper(self.state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin') self.state.data = self.data def test_default_numerical(self): self.state.comp = self.data.id['x'] assert self.state.x_min == -3.2 assert self.state.x_max == 7.2 assert self.state.n_bin == 15 def test_default_categorical(self): self.state.comp = self.data.id['y'] assert self.state.x_min == -0.5 assert self.state.x_max == 3.5 assert self.state.n_bin == 4 def test_hitting_limits(self): # FIXME: here we modify the internal defaults rather than making a new # state helper, but this could be improved self.helper._default_n_bin = 4 self.helper._max_n_bin = 3 self.state.comp = self.data.id['x'] assert self.state.x_min == -3.2 assert self.state.x_max == 7.2 assert self.state.n_bin == 4 self.state.comp = self.data.id['y'] assert self.state.x_min == -0.5 assert self.state.x_max == 3.5 assert self.state.n_bin == 3 def test_caching(self): self.state.comp = self.data.id['x'] self.state.x_min = 2 self.state.x_max = 7 self.state.n_bin = 8 self.state.comp = self.data.id['y'] self.state.x_min = 1.5 self.state.x_max = 3.5 self.state.n_bin = 3 self.state.comp = self.data.id['x'] assert self.state.x_min == 2 assert self.state.x_max == 7 assert self.state.n_bin == 8 self.state.comp = self.data.id['y'] assert self.state.x_min == 1.5 assert self.state.x_max == 3.5 assert self.state.n_bin == 3 def test_histogram_helper_common_n_bin(): data = Data(x=[-3.2, 4.3, 2.2], y=['a', 'f', 'd'], z=[1.1, 2.3, 1.2], label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() x_min = CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty() state = SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin', common_n_bin='common') state.data = data state.comp = data.id['x'] state.n_bin = 9 state.comp = data.id['y'] assert state.n_bin == 3 state.comp = data.id['z'] assert state.n_bin == 15 state.n_bin = 12 state.common = True state.comp = data.id['x'] assert state.n_bin == 12 state.n_bin = 11 state.comp = data.id['y'] assert state.n_bin == 3 state.comp = data.id['z'] assert state.n_bin == 11 state.common = False state.n_bin = 13 state.comp = data.id['x'] assert state.n_bin == 11 def test_histogram_helper_common_n_bin_active(): # Make sure that common_n_bin works as expected if True from start data = Data(x=[-3.2, 4.3, 2.2], y=['a', 'f', 'd'], z=[1.1, 2.3, 1.2], label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() x_min = CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty(True) state = SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin', common_n_bin='common') state.data = data state.comp = data.id['x'] state.n_bin = 9 state.comp = data.id['z'] assert state.n_bin == 9 state.n_bin = 12 state.common = True state.comp = data.id['x'] assert state.n_bin == 12 state.n_bin = 11 state.comp = data.id['y'] assert state.n_bin == 3 state.comp = data.id['z'] assert state.n_bin == 11 state.common = False state.n_bin = 13 state.comp = data.id['x'] assert state.n_bin == 11 def test_limits_helper_initial_values(): # Regression test for a bug that occurred if the limits cache was empty # but some attributes were set to values - in this case we don't want to # override the existing values. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() state = SimpleState() state.lower = 1 state.upper = 2 state.comp = data.id['x'] helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper') assert helper.lower == 1 assert helper.upper == 2 class DatetimeState(State): a = CallbackProperty() def test_state_serialization_datetime64(): state1 = DatetimeState() state1.a = np.datetime64(100, 'D') state2 = clone(state1) assert state2.a == np.datetime64(100, 'D') def test_nan_inf_minmax(): data = Data(x=[3, 1, -2, np.inf, np.nan], label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() percentile = CallbackProperty() log = CallbackProperty() state = SimpleState() helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa lower='lower', upper='upper', percentile='percentile', log='log') state.data = data state.comp = data.id['x'] assert state.lower == -2 assert state.upper == +3 state.log = True assert state.lower == +1 assert state.upper == +3 state.log = False state.percentile = 99 assert_allclose(state.lower, -1.97) assert_allclose(state.upper, +2.98) def test_percentile_no_log(): # Regression test for a bug that caused a crash if the state class had a # percentile attribute but no log. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() scale = CallbackProperty() state = SimpleState() state.comp = data.id['x'] state.lower = 2 state.upper = 4 helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper', percentile='scale') state.scale = 90
[((15, 8, 15, 26), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((16, 8, 16, 26), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((17, 11, 17, 33), 'echo.ListCallbackProperty', 'ListCallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((18, 13, 18, 35), 'echo.ListCallbackProperty', 'ListCallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((305, 11, 308, 34), 'glue.core.Data', 'Data', (), '', False, 'from glue.core import Data, DataCollection\n'), ((359, 11, 362, 34), 'glue.core.Data', 'Data', (), '', False, 'from glue.core import Data, DataCollection\n'), ((436, 8, 436, 26), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((442, 15, 442, 38), 'numpy.datetime64', 'np.datetime64', ({(442, 29, 442, 32): '100', (442, 34, 442, 37): '"""D"""'}, {}), "(100, 'D')", True, 'import numpy as np\n'), ((451, 11, 451, 64), 'glue.core.Data', 'Data', (), '', False, 'from glue.core import Data, DataCollection\n'), ((482, 4, 482, 39), 'numpy.testing.assert_allclose', 'assert_allclose', ({(482, 20, 482, 31): 'state.lower', (482, 33, 482, 38): '(-1.97)'}, {}), '(state.lower, -1.97)', False, 'from numpy.testing import assert_allclose\n'), ((483, 4, 483, 39), 'numpy.testing.assert_allclose', 'assert_allclose', ({(483, 20, 483, 31): 'state.upper', (483, 33, 483, 38): '(+2.98)'}, {}), '(state.upper, +2.98)', False, 'from numpy.testing import assert_allclose\n'), ((87, 31, 87, 58), 'glue.core.DataCollection', 'DataCollection', ({(87, 46, 87, 57): '[self.data]'}, {}), '([self.data])', False, 'from glue.core import Data, DataCollection\n'), ((125, 8, 125, 49), 'numpy.testing.assert_allclose', 'assert_allclose', ({(125, 24, 125, 41): 'self.helper.lower', (125, 43, 125, 48): '(-99.5)'}, {}), '(self.helper.lower, -99.5)', False, 'from numpy.testing import assert_allclose\n'), ((126, 8, 126, 49), 'numpy.testing.assert_allclose', 'assert_allclose', ({(126, 24, 126, 41): 'self.helper.upper', (126, 43, 126, 48): '(+99.5)'}, {}), '(self.helper.upper, +99.5)', False, 'from numpy.testing import assert_allclose\n'), ((128, 8, 128, 47), 'numpy.testing.assert_allclose', 'assert_allclose', ({(128, 24, 128, 41): 'self.helper.lower', (128, 43, 128, 46): '(-99)'}, {}), '(self.helper.lower, -99)', False, 'from numpy.testing import assert_allclose\n'), ((129, 8, 129, 47), 'numpy.testing.assert_allclose', 'assert_allclose', ({(129, 24, 129, 41): 'self.helper.upper', (129, 43, 129, 46): '(+99)'}, {}), '(self.helper.upper, +99)', False, 'from numpy.testing import assert_allclose\n'), ((131, 8, 131, 47), 'numpy.testing.assert_allclose', 'assert_allclose', ({(131, 24, 131, 41): 'self.helper.lower', (131, 43, 131, 46): '(-90)'}, {}), '(self.helper.lower, -90)', False, 'from numpy.testing import assert_allclose\n'), ((132, 8, 132, 47), 'numpy.testing.assert_allclose', 'assert_allclose', ({(132, 24, 132, 41): 'self.helper.upper', (132, 43, 132, 46): '(+90)'}, {}), '(self.helper.upper, +90)', False, 'from numpy.testing import assert_allclose\n'), ((136, 8, 136, 47), 'numpy.testing.assert_allclose', 'assert_allclose', ({(136, 24, 136, 41): 'self.helper.lower', (136, 43, 136, 46): '(-90)'}, {}), '(self.helper.lower, -90)', False, 'from numpy.testing import assert_allclose\n'), ((137, 8, 137, 47), 'numpy.testing.assert_allclose', 'assert_allclose', ({(137, 24, 137, 41): 'self.helper.upper', (137, 43, 137, 46): '(+90)'}, {}), '(self.helper.upper, +90)', False, 'from numpy.testing import assert_allclose\n'), ((193, 31, 193, 58), 'glue.core.DataCollection', 'DataCollection', ({(193, 46, 193, 57): '[self.data]'}, {}), '([self.data])', False, 'from glue.core import Data, DataCollection\n'), ((235, 20, 236, 82), 'glue.core.Data', 'Data', (), '', False, 'from glue.core import Data, DataCollection\n'), ((238, 31, 238, 58), 'glue.core.DataCollection', 'DataCollection', ({(238, 46, 238, 57): '[self.data]'}, {}), '([self.data])', False, 'from glue.core import Data, DataCollection\n'), ((312, 16, 312, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((313, 15, 313, 33), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((314, 16, 314, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((315, 16, 315, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((316, 16, 316, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((317, 17, 317, 35), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((366, 16, 366, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((367, 15, 367, 33), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((368, 16, 368, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((369, 16, 369, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((370, 16, 370, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((371, 17, 371, 39), 'echo.CallbackProperty', 'CallbackProperty', ({(371, 34, 371, 38): 'True'}, {}), '(True)', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((418, 16, 418, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((419, 15, 419, 33), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((420, 16, 420, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((421, 16, 421, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((446, 23, 446, 46), 'numpy.datetime64', 'np.datetime64', ({(446, 37, 446, 40): '(100)', (446, 42, 446, 45): '"""D"""'}, {}), "(100, 'D')", True, 'import numpy as np\n'), ((455, 16, 455, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((456, 15, 456, 33), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((457, 16, 457, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((458, 16, 458, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((459, 21, 459, 39), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((460, 14, 460, 32), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((496, 16, 496, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((497, 15, 497, 33), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((498, 16, 498, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((499, 16, 499, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((500, 16, 500, 34), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((91, 20, 91, 38), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((92, 19, 92, 37), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((93, 20, 93, 38), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((94, 20, 94, 38), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((95, 18, 95, 41), 'echo.CallbackProperty', 'CallbackProperty', ({(95, 35, 95, 40): 'False'}, {}), '(False)', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((96, 20, 96, 41), 'echo.CallbackProperty', 'CallbackProperty', ({(96, 37, 96, 40): '100'}, {}), '(100)', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((197, 20, 197, 38), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((198, 19, 198, 37), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((199, 18, 199, 36), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((242, 20, 242, 38), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((243, 19, 243, 37), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((244, 20, 244, 38), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((245, 20, 245, 38), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((246, 20, 246, 38), 'echo.CallbackProperty', 'CallbackProperty', ({}, {}), '()', False, 'from echo import CallbackProperty, ListCallbackProperty\n'), ((413, 18, 413, 47), 'numpy.linspace', 'np.linspace', ({(413, 30, 413, 34): '-100', (413, 36, 413, 39): '100', (413, 41, 413, 46): '10000'}, {}), '(-100, 100, 10000)', True, 'import numpy as np\n'), ((414, 18, 414, 42), 'numpy.linspace', 'np.linspace', ({(414, 30, 414, 31): '2', (414, 33, 414, 34): '3', (414, 36, 414, 41): '10000'}, {}), '(2, 3, 10000)', True, 'import numpy as np\n'), ((491, 18, 491, 47), 'numpy.linspace', 'np.linspace', ({(491, 30, 491, 34): '-100', (491, 36, 491, 39): '100', (491, 41, 491, 46): '10000'}, {}), '(-100, 100, 10000)', True, 'import numpy as np\n'), ((492, 18, 492, 42), 'numpy.linspace', 'np.linspace', ({(492, 30, 492, 31): '2', (492, 33, 492, 34): '3', (492, 36, 492, 41): '10000'}, {}), '(2, 3, 10000)', True, 'import numpy as np\n'), ((84, 27, 84, 56), 'numpy.linspace', 'np.linspace', ({(84, 39, 84, 43): '-100', (84, 45, 84, 48): '100', (84, 50, 84, 55): '10000'}, {}), '(-100, 100, 10000)', True, 'import numpy as np\n'), ((85, 27, 85, 51), 'numpy.linspace', 'np.linspace', ({(85, 39, 85, 40): '2', (85, 42, 85, 43): '3', (85, 45, 85, 50): '10000'}, {}), '(2, 3, 10000)', True, 'import numpy as np\n'), ((190, 27, 190, 54), 'numpy.linspace', 'np.linspace', ({(190, 39, 190, 43): '-100', (190, 45, 190, 47): '30', (190, 49, 190, 53): '9999'}, {}), '(-100, 30, 9999)', True, 'import numpy as np\n'), ((191, 27, 191, 50), 'numpy.linspace', 'np.linspace', ({(191, 39, 191, 40): '2', (191, 42, 191, 43): '3', (191, 45, 191, 49): '9999'}, {}), '(2, 3, 9999)', True, 'import numpy as np\n')]
victormartinez/ecommerceapi
ecommerce_api/core/cart/exceptions.py
a887d9e938050c15ebf52001f63d7aa7f33fa5ee
from typing import Iterable, Optional class ProductsNotFound(Exception): def __init__(self, product_ids: Optional[Iterable[int]] = None): self.product_ids = product_ids or [] self.message = "One or more products are invalid." super().__init__(self.message)
[]
jsoref/neo4j-python-driver
test/unit/test_record.py
32c130c9a975dbf8c0d345b362d096b5e1dd3e5b
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2002-2018 "Neo Technology," # Network Engine for Objects in Lund AB [http://neotechnology.com] # # This file is part of Neo4j. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from neo4j.v1 import Record class RecordTestCase(TestCase): def test_record_equality(self): record1 = Record(["name", "empire"], ["Nigel", "The British Empire"]) record2 = Record(["name", "empire"], ["Nigel", "The British Empire"]) record3 = Record(["name", "empire"], ["Stefan", "Das Deutschland"]) assert record1 == record2 assert record1 != record3 assert record2 != record3 def test_record_hashing(self): record1 = Record(["name", "empire"], ["Nigel", "The British Empire"]) record2 = Record(["name", "empire"], ["Nigel", "The British Empire"]) record3 = Record(["name", "empire"], ["Stefan", "Das Deutschland"]) assert hash(record1) == hash(record2) assert hash(record1) != hash(record3) assert hash(record2) != hash(record3) def test_record_iter(self): a_record = Record(["name", "empire"], ["Nigel", "The British Empire"]) assert list(a_record.__iter__()) == ["name", "empire"] def test_record_copy(self): original = Record(["name", "empire"], ["Nigel", "The British Empire"]) duplicate = original.copy() assert dict(original) == dict(duplicate) assert original.keys() == duplicate.keys() assert original is not duplicate def test_record_as_dict(self): a_record = Record(["name", "empire"], ["Nigel", "The British Empire"]) assert dict(a_record) == {"name": "Nigel", "empire": "The British Empire"} def test_record_as_list(self): a_record = Record(["name", "empire"], ["Nigel", "The British Empire"]) assert list(a_record) == ["name", "empire"] def test_record_len(self): a_record = Record(["name", "empire"], ["Nigel", "The British Empire"]) assert len(a_record) == 2 def test_record_repr(self): a_record = Record(["name", "empire"], ["Nigel", "The British Empire"]) assert repr(a_record) == "<Record name='Nigel' empire='The British Empire'>" def test_record_data(self): r = Record(["name", "age", "married"], ["Alice", 33, True]) self.assertEqual(r.data(), {"name": "Alice", "age": 33, "married": True}) self.assertEqual(r.data("name"), {"name": "Alice"}) self.assertEqual(r.data("age", "name"), {"age": 33, "name": "Alice"}) self.assertEqual(r.data("age", "name", "shoe size"), {"age": 33, "name": "Alice", "shoe size": None}) self.assertEqual(r.data(0, "name"), {"name": "Alice"}) self.assertEqual(r.data(0), {"name": "Alice"}) self.assertEqual(r.data(1, 0), {"age": 33, "name": "Alice"}) with self.assertRaises(IndexError): _ = r.data(1, 0, 999) def test_record_keys(self): r = Record(["name", "age", "married"], ["Alice", 33, True]) self.assertEqual(r.keys(), ("name", "age", "married")) def test_record_values(self): r = Record(["name", "age", "married"], ["Alice", 33, True]) self.assertEqual(r.values(), ("Alice", 33, True)) self.assertEqual(r.values("name"), ("Alice",)) self.assertEqual(r.values("age", "name"), (33, "Alice")) self.assertEqual(r.values("age", "name", "shoe size"), (33, "Alice", None)) self.assertEqual(r.values(0, "name"), ("Alice", "Alice")) self.assertEqual(r.values(0), ("Alice",)) self.assertEqual(r.values(1, 0), (33, "Alice")) with self.assertRaises(IndexError): _ = r.values(1, 0, 999) def test_record_items(self): r = Record(["name", "age", "married"], ["Alice", 33, True]) self.assertEqual(r.items(), [("name", "Alice"), ("age", 33), ("married", True)]) self.assertEqual(r.items("name"), [("name", "Alice")]) self.assertEqual(r.items("age", "name"), [("age", 33), ("name", "Alice")]) self.assertEqual(r.items("age", "name", "shoe size"), [("age", 33), ("name", "Alice"), ("shoe size", None)]) self.assertEqual(r.items(0, "name"), [("name", "Alice"), ("name", "Alice")]) self.assertEqual(r.items(0), [("name", "Alice")]) self.assertEqual(r.items(1, 0), [("age", 33), ("name", "Alice")]) with self.assertRaises(IndexError): _ = r.items(1, 0, 999) def test_record_index(self): r = Record(["name", "age", "married"], ["Alice", 33, True]) self.assertEqual(r.index("name"), 0) self.assertEqual(r.index("age"), 1) self.assertEqual(r.index("married"), 2) with self.assertRaises(KeyError): _ = r.index("shoe size") self.assertEqual(r.index(0), 0) self.assertEqual(r.index(1), 1) self.assertEqual(r.index(2), 2) with self.assertRaises(IndexError): _ = r.index(3) with self.assertRaises(TypeError): _ = r.index(None) def test_record_value(self): r = Record(["name", "age", "married"], ["Alice", 33, True]) self.assertEqual(r.value(), "Alice") self.assertEqual(r.value("name"), "Alice") self.assertEqual(r.value("age"), 33) self.assertEqual(r.value("married"), True) self.assertEqual(r.value("shoe size"), None) self.assertEqual(r.value("shoe size", 6), 6) self.assertEqual(r.value(0), "Alice") self.assertEqual(r.value(1), 33) self.assertEqual(r.value(2), True) self.assertEqual(r.value(3), None) self.assertEqual(r.value(3, 6), 6) with self.assertRaises(TypeError): _ = r.value(None) def test_record_contains(self): r = Record(["name", "age", "married"], ["Alice", 33, True]) self.assertTrue("name" in r) self.assertTrue("age" in r) self.assertTrue("married" in r) self.assertFalse("shoe size" in r) self.assertTrue(0 in r) self.assertTrue(1 in r) self.assertTrue(2 in r) self.assertFalse(3 in r) with self.assertRaises(TypeError): _ = r.index(None)
[((30, 18, 30, 77), 'neo4j.v1.Record', 'Record', ({(30, 25, 30, 43): "['name', 'empire']", (30, 45, 30, 76): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((31, 18, 31, 77), 'neo4j.v1.Record', 'Record', ({(31, 25, 31, 43): "['name', 'empire']", (31, 45, 31, 76): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((32, 18, 32, 75), 'neo4j.v1.Record', 'Record', ({(32, 25, 32, 43): "['name', 'empire']", (32, 45, 32, 74): "['Stefan', 'Das Deutschland']"}, {}), "(['name', 'empire'], ['Stefan', 'Das Deutschland'])", False, 'from neo4j.v1 import Record\n'), ((38, 18, 38, 77), 'neo4j.v1.Record', 'Record', ({(38, 25, 38, 43): "['name', 'empire']", (38, 45, 38, 76): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((39, 18, 39, 77), 'neo4j.v1.Record', 'Record', ({(39, 25, 39, 43): "['name', 'empire']", (39, 45, 39, 76): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((40, 18, 40, 75), 'neo4j.v1.Record', 'Record', ({(40, 25, 40, 43): "['name', 'empire']", (40, 45, 40, 74): "['Stefan', 'Das Deutschland']"}, {}), "(['name', 'empire'], ['Stefan', 'Das Deutschland'])", False, 'from neo4j.v1 import Record\n'), ((46, 19, 46, 78), 'neo4j.v1.Record', 'Record', ({(46, 26, 46, 44): "['name', 'empire']", (46, 46, 46, 77): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((50, 19, 50, 78), 'neo4j.v1.Record', 'Record', ({(50, 26, 50, 44): "['name', 'empire']", (50, 46, 50, 77): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((57, 19, 57, 78), 'neo4j.v1.Record', 'Record', ({(57, 26, 57, 44): "['name', 'empire']", (57, 46, 57, 77): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((61, 19, 61, 78), 'neo4j.v1.Record', 'Record', ({(61, 26, 61, 44): "['name', 'empire']", (61, 46, 61, 77): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((65, 19, 65, 78), 'neo4j.v1.Record', 'Record', ({(65, 26, 65, 44): "['name', 'empire']", (65, 46, 65, 77): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((69, 19, 69, 78), 'neo4j.v1.Record', 'Record', ({(69, 26, 69, 44): "['name', 'empire']", (69, 46, 69, 77): "['Nigel', 'The British Empire']"}, {}), "(['name', 'empire'], ['Nigel', 'The British Empire'])", False, 'from neo4j.v1 import Record\n'), ((73, 12, 73, 67), 'neo4j.v1.Record', 'Record', ({(73, 19, 73, 45): "['name', 'age', 'married']", (73, 47, 73, 66): "['Alice', 33, True]"}, {}), "(['name', 'age', 'married'], ['Alice', 33, True])", False, 'from neo4j.v1 import Record\n'), ((85, 12, 85, 67), 'neo4j.v1.Record', 'Record', ({(85, 19, 85, 45): "['name', 'age', 'married']", (85, 47, 85, 66): "['Alice', 33, True]"}, {}), "(['name', 'age', 'married'], ['Alice', 33, True])", False, 'from neo4j.v1 import Record\n'), ((89, 12, 89, 67), 'neo4j.v1.Record', 'Record', ({(89, 19, 89, 45): "['name', 'age', 'married']", (89, 47, 89, 66): "['Alice', 33, True]"}, {}), "(['name', 'age', 'married'], ['Alice', 33, True])", False, 'from neo4j.v1 import Record\n'), ((101, 12, 101, 67), 'neo4j.v1.Record', 'Record', ({(101, 19, 101, 45): "['name', 'age', 'married']", (101, 47, 101, 66): "['Alice', 33, True]"}, {}), "(['name', 'age', 'married'], ['Alice', 33, True])", False, 'from neo4j.v1 import Record\n'), ((113, 12, 113, 67), 'neo4j.v1.Record', 'Record', ({(113, 19, 113, 45): "['name', 'age', 'married']", (113, 47, 113, 66): "['Alice', 33, True]"}, {}), "(['name', 'age', 'married'], ['Alice', 33, True])", False, 'from neo4j.v1 import Record\n'), ((128, 12, 128, 67), 'neo4j.v1.Record', 'Record', ({(128, 19, 128, 45): "['name', 'age', 'married']", (128, 47, 128, 66): "['Alice', 33, True]"}, {}), "(['name', 'age', 'married'], ['Alice', 33, True])", False, 'from neo4j.v1 import Record\n'), ((144, 12, 144, 67), 'neo4j.v1.Record', 'Record', ({(144, 19, 144, 45): "['name', 'age', 'married']", (144, 47, 144, 66): "['Alice', 33, True]"}, {}), "(['name', 'age', 'married'], ['Alice', 33, True])", False, 'from neo4j.v1 import Record\n')]
hugocool/explainerdashboard
tests/integration_tests/test_dashboards.py
e725528c3d94a1a45b51bd9632686d0697274f54
import dash from catboost import CatBoostClassifier, CatBoostRegressor from xgboost import XGBClassifier, XGBRegressor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names from explainerdashboard.dashboards import ExplainerDashboard def get_classification_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_survive() if xgboost: model = XGBClassifier().fit(X_train, y_train) else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: explainer = ClassifierExplainer( model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) else: explainer = ClassifierExplainer( model, X_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) explainer.calculate_properties() return explainer def get_regression_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_fare() train_names, test_names = titanic_names() if xgboost: model = XGBRegressor().fit(X_train, y_train) else: model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: reg_explainer = RegressionExplainer(model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units="$") else: reg_explainer = RegressionExplainer(model, X_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units="$") reg_explainer.calculate_properties() return reg_explainer def get_multiclass_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_embarked() train_names, test_names = titanic_names() if xgboost: model = XGBClassifier().fit(X_train, y_train) else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: if xgboost: multi_explainer = ClassifierExplainer(model, X_test, y_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, y_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: if xgboost: multi_explainer = ClassifierExplainer(model, X_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) multi_explainer.calculate_properties() return multi_explainer def get_catboost_classifier(): X_train, y_train, X_test, y_test = titanic_survive() train_names, test_names = titanic_names() model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train) explainer = ClassifierExplainer( model, X_test, y_test, cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']}, 'Deck', 'Embarked'], labels=['Not survived', 'Survived'], idxs=test_names) X_cats, y_cats = explainer.X_merged, explainer.y.astype("int") model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer = ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def get_catboost_regressor(): X_train, y_train, X_test, y_test = titanic_fare() model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train) explainer = RegressionExplainer(model, X_test, y_test, cats=["Sex", 'Deck', 'Embarked']) X_cats, y_cats = explainer.X_merged, explainer.y model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer = RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def test_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=20) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_simple_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title="testing", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("#simple-classifier-composite-title", "testing", timeout=20) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_simple_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title="testing", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("#simple-regression-composite-title", "testing", timeout=20) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer() db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_xgboost_classification_dashboard(dash_duo): explainer = get_classification_explainer(xgboost=True) db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_xgboost_regression_dashboard(dash_duo): explainer = get_regression_explainer(xgboost=True) db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_xgboost_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer(xgboost=True) db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_classification_dashboard_no_y(dash_duo): explainer = get_classification_explainer(include_y=False) db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_regression_dashboard_no_y(dash_duo): explainer = get_regression_explainer(include_y=False) db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_multiclass_dashboard_no_y(dash_duo): explainer = get_multiclass_explainer(include_y=False) db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_catboost_classification_dashboard(dash_duo): explainer = get_catboost_classifier() db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error" def test_cat_boost_regression_dashboard(dash_duo): explainer = get_catboost_regressor() db = ExplainerDashboard(explainer, title="testing", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30) assert dash_duo.get_logs() == [], "browser console should contain no error"
[((15, 39, 15, 56), 'explainerdashboard.datasets.titanic_survive', 'titanic_survive', ({}, {}), '()', False, 'from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names\n'), ((36, 39, 36, 53), 'explainerdashboard.datasets.titanic_fare', 'titanic_fare', ({}, {}), '()', False, 'from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names\n'), ((37, 30, 37, 45), 'explainerdashboard.datasets.titanic_names', 'titanic_names', ({}, {}), '()', False, 'from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names\n'), ((58, 39, 58, 57), 'explainerdashboard.datasets.titanic_embarked', 'titanic_embarked', ({}, {}), '()', False, 'from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names\n'), ((59, 30, 59, 45), 'explainerdashboard.datasets.titanic_names', 'titanic_names', ({}, {}), '()', False, 'from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names\n'), ((91, 39, 91, 56), 'explainerdashboard.datasets.titanic_survive', 'titanic_survive', ({}, {}), '()', False, 'from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names\n'), ((92, 30, 92, 45), 'explainerdashboard.datasets.titanic_names', 'titanic_names', ({}, {}), '()', False, 'from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names\n'), ((95, 16, 100, 40), 'explainerdashboard.explainers.ClassifierExplainer', 'ClassifierExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((104, 16, 104, 77), 'explainerdashboard.explainers.ClassifierExplainer', 'ClassifierExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((110, 39, 110, 53), 'explainerdashboard.datasets.titanic_fare', 'titanic_fare', ({}, {}), '()', False, 'from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names\n'), ((113, 16, 114, 69), 'explainerdashboard.explainers.RegressionExplainer', 'RegressionExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((117, 16, 117, 77), 'explainerdashboard.explainers.RegressionExplainer', 'RegressionExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((124, 9, 124, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((132, 9, 132, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((139, 9, 139, 86), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((147, 9, 147, 86), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((155, 9, 155, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((163, 9, 163, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((171, 9, 171, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((179, 9, 179, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((187, 9, 187, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((195, 9, 195, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((203, 9, 203, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((211, 9, 211, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((219, 9, 219, 73), 'explainerdashboard.dashboards.ExplainerDashboard', 'ExplainerDashboard', (), '', False, 'from explainerdashboard.dashboards import ExplainerDashboard\n'), ((21, 20, 24, 64), 'explainerdashboard.explainers.ClassifierExplainer', 'ClassifierExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((26, 20, 29, 64), 'explainerdashboard.explainers.ClassifierExplainer', 'ClassifierExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((44, 24, 47, 50), 'explainerdashboard.explainers.RegressionExplainer', 'RegressionExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((49, 24, 52, 50), 'explainerdashboard.explainers.RegressionExplainer', 'RegressionExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((67, 30, 70, 94), 'explainerdashboard.explainers.ClassifierExplainer', 'ClassifierExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((72, 30, 74, 94), 'explainerdashboard.explainers.ClassifierExplainer', 'ClassifierExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((77, 30, 80, 94), 'explainerdashboard.explainers.ClassifierExplainer', 'ClassifierExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((82, 30, 84, 94), 'explainerdashboard.explainers.ClassifierExplainer', 'ClassifierExplainer', (), '', False, 'from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer\n'), ((94, 12, 94, 57), 'catboost.CatBoostClassifier', 'CatBoostClassifier', (), '', False, 'from catboost import CatBoostClassifier, CatBoostRegressor\n'), ((103, 12, 103, 55), 'catboost.CatBoostClassifier', 'CatBoostClassifier', (), '', False, 'from catboost import CatBoostClassifier, CatBoostRegressor\n'), ((112, 12, 112, 54), 'catboost.CatBoostRegressor', 'CatBoostRegressor', (), '', False, 'from catboost import CatBoostClassifier, CatBoostRegressor\n'), ((116, 12, 116, 54), 'catboost.CatBoostRegressor', 'CatBoostRegressor', (), '', False, 'from catboost import CatBoostClassifier, CatBoostRegressor\n'), ((17, 16, 17, 31), 'xgboost.XGBClassifier', 'XGBClassifier', ({}, {}), '()', False, 'from xgboost import XGBClassifier, XGBRegressor\n'), ((19, 16, 19, 69), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (), '', False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((39, 16, 39, 30), 'xgboost.XGBRegressor', 'XGBRegressor', ({}, {}), '()', False, 'from xgboost import XGBClassifier, XGBRegressor\n'), ((41, 16, 41, 68), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', (), '', False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((61, 16, 61, 31), 'xgboost.XGBClassifier', 'XGBClassifier', ({}, {}), '()', False, 'from xgboost import XGBClassifier, XGBRegressor\n'), ((63, 16, 63, 69), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (), '', False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n')]
dgrechka/bengaliai-cv19
code/scripts/GeneratePNG_Preview_AsIs.py
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
import tensorflow as tf import sys import os from glob import glob import png sys.path.append(os.path.join(__file__,'..','..')) from tfDataIngest import tfDataSetParquet as tfDsParquet inputDataDir = sys.argv[1] outputDir = sys.argv[2] # test app if __name__ == "__main__": files = glob(os.path.join(inputDataDir,"train*.parquet")) print("Found {0} parquet files in input dir {1}".format(len(files),inputDataDir)) print("First is {0}".format(files[0])) ds = tfDsParquet.create_parquet_dataset([files[0]]) for element in ds.as_numpy_iterator(): #print("Iterating...") sampleId,pixels = element sampleId = sampleId.decode("utf-8") fileName = os.path.join(outputDir,"{0}.png".format(sampleId)) png.from_array(pixels, mode="L").save(fileName) #print(element) #print("sample name is {0}".format(sampleId)) #print(sampleIds.shape) #print(pixels.shape) # a += 1 # if a > 10: # break print("Done") #print("{0} elements in the dataset".format(len(ds.)))
[((7, 16, 7, 48), 'os.path.join', 'os.path.join', ({(7, 29, 7, 37): '__file__', (7, 38, 7, 42): '""".."""', (7, 43, 7, 47): '""".."""'}, {}), "(__file__, '..', '..')", False, 'import os\n'), ((19, 9, 19, 55), 'tfDataIngest.tfDataSetParquet.create_parquet_dataset', 'tfDsParquet.create_parquet_dataset', ({(19, 44, 19, 54): '[files[0]]'}, {}), '([files[0]])', True, 'from tfDataIngest import tfDataSetParquet as tfDsParquet\n'), ((16, 17, 16, 60), 'os.path.join', 'os.path.join', ({(16, 30, 16, 42): 'inputDataDir', (16, 43, 16, 59): '"""train*.parquet"""'}, {}), "(inputDataDir, 'train*.parquet')", False, 'import os\n'), ((27, 8, 27, 40), 'png.from_array', 'png.from_array', (), '', False, 'import png\n')]
RSabet/wxGlade
widgets/datepicker_ctrl/codegen.py
8b62eb8397308e60977857455b2765727b1b940f
"""\ Code generator functions for wxDatePickerCtrl objects @copyright: 2002-2007 Alberto Griggio @copyright: 2014-2016 Carsten Grohmann @copyright: 2016-2021 Dietmar Schwertberger @license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY """ import common, compat import wcodegen class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\n' # XXX the following needs to depend on the code generator when Phoenix is about to be supported fully: if compat.IS_PHOENIX: import_modules = ['import wx.adv\n'] if compat.IS_PHOENIX: def cn(self, name): # don't process already formatted items again if name.startswith('wx.'): return name if name.startswith('wx'): return 'wx.adv.' + name[2:] elif name.startswith('EVT_'): return 'wx.adv.' + name return name def _prepare_tmpl_content(self, obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules = ['<wx/datectrl.h>'] tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s, ' \ 'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \ '%(style)s);\n' prefix_style = False set_default_style = True def _prepare_tmpl_content(self, obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return def xrc_code_generator(obj): xrcgen = common.code_writers['XRC'] class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def write_property(self, name, val, output, tabs): if name == 'label': # translate & into _ as accelerator marker val2 = val.replace('&', '_') if val.count('&&') > 0: while True: index = val.find('&&') if index < 0: break val = val2[:index] + '&&' + val2[index+2:] else: val = val2 xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs) return DatePickerCtrlXrcObject(obj) def initialize(): klass = 'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl'] = klass common.register('python', klass, PythonDatePickerCtrlGenerator(klass)) common.register('C++', klass, CppDatePickerCtrlGenerator(klass)) common.register('XRC', klass, xrc_code_generator)
[((81, 4, 81, 56), 'common.register', 'common.register', ({(81, 20, 81, 25): '"""XRC"""', (81, 30, 81, 35): 'klass', (81, 37, 81, 55): 'xrc_code_generator'}, {}), "('XRC', klass, xrc_code_generator)", False, 'import common, compat\n'), ((33, 8, 33, 72), 'wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content', 'wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content', ({(33, 62, 33, 66): 'self', (33, 68, 33, 71): 'obj'}, {}), '(self, obj)', False, 'import wcodegen\n'), ((49, 8, 49, 69), 'wcodegen.CppWidgetCodeWriter._prepare_tmpl_content', 'wcodegen.CppWidgetCodeWriter._prepare_tmpl_content', ({(49, 59, 49, 63): 'self', (49, 65, 49, 68): 'obj'}, {}), '(self, obj)', False, 'import wcodegen\n')]
lck1201/simple-effective-3Dpose-baseline
train.py
790a185b44e48a9cc619f52b6615aae729bff76b
import pprint import mxnet as mx from mxnet import gluon from mxnet import init from lib.core.get_optimizer import * from lib.core.metric import MPJPEMetric from lib.core.loss import MeanSquareLoss from lib.core.loader import JointsDataIter from lib.network import get_net from lib.net_module import * from lib.utils import * from lib.dataset.hm36 import hm36 from config import config, gen_config, update_config_from_args, s_args config = update_config_from_args(config, s_args) def main(): # Parse config and mkdir output logger, final_Model_path = create_logger(config) config.final_Model_path = final_Model_path gen_config(os.path.join(final_Model_path, 'hyperParams.yaml')) logger.info('Training config:{}\n'.format(pprint.pformat(config))) # define context if config.useGPU: ctx = [mx.gpu(int(i)) for i in config.gpu.split(',')] else: ctx = mx.cpu() logger.info("Using context:", ctx) # dataset, generate trainset/ validation set train_imdbs = [] valid_imdbs = [] for i in range(len(config.DATASET.train_image_set)): logger.info("Construct Dataset:", config.DATASET.dbname[i], ", Dataset Path:", config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i], config.final_Model_path)) data_names = ['hm36data'] label_names = ['hm36label'] train_data_iter = JointsDataIter(train_imdbs[0], runmode=0, data_names = data_names, label_names=label_names, shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger) valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1, data_names = data_names, label_names=label_names, shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger) assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all() # network net = get_net(config) if config.resume: ckp_path = os.path.join(config.resumeckp) net.collect_params().load(ckp_path, ctx=ctx) else: net.initialize(init=init.MSRAPrelu(), ctx=ctx) if config.NETWORK.hybrid: net.hybridize() logger.info(net) # define loss and metric mean3d = train_data_iter.get_meanstd()['mean3d'] std3d = train_data_iter.get_meanstd()['std3d'] train_metric = MPJPEMetric('train_metric', mean3d, std3d) eval_metric = MPJPEMetric('valid_metric', mean3d, std3d) loss = MeanSquareLoss() # optimizer optimizer, optimizer_params = get_optimizer(config, ctx) # train and valid TrainDBsize = train_data_iter.get_size() ValidDBsize = valid_data_iter.get_size() logger.info("Train DB size:", TrainDBsize, "Valid DB size:",ValidDBsize) if not isinstance(train_data_iter, mx.io.PrefetchingIter): train_data_iter = mx.io.PrefetchingIter(train_data_iter) trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params) for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch): trainNet(net, trainer, train_data_iter, loss, train_metric, epoch, config, logger=logger, ctx=ctx) validNet(net, valid_data_iter, loss, eval_metric, epoch, config, logger=logger, ctx=ctx) logger.kill() if __name__ == '__main__': main()
[((17, 9, 17, 48), 'config.update_config_from_args', 'update_config_from_args', ({(17, 33, 17, 39): 'config', (17, 41, 17, 47): 's_args'}, {}), '(config, s_args)', False, 'from config import config, gen_config, update_config_from_args, s_args\n'), ((57, 10, 57, 25), 'lib.network.get_net', 'get_net', ({(57, 18, 57, 24): 'config'}, {}), '(config)', False, 'from lib.network import get_net\n'), ((72, 19, 72, 61), 'lib.core.metric.MPJPEMetric', 'MPJPEMetric', ({(72, 31, 72, 45): '"""train_metric"""', (72, 47, 72, 53): 'mean3d', (72, 55, 72, 60): 'std3d'}, {}), "('train_metric', mean3d, std3d)", False, 'from lib.core.metric import MPJPEMetric\n'), ((73, 19, 73, 61), 'lib.core.metric.MPJPEMetric', 'MPJPEMetric', ({(73, 31, 73, 45): '"""valid_metric"""', (73, 47, 73, 53): 'mean3d', (73, 55, 73, 60): 'std3d'}, {}), "('valid_metric', mean3d, std3d)", False, 'from lib.core.metric import MPJPEMetric\n'), ((74, 19, 74, 35), 'lib.core.loss.MeanSquareLoss', 'MeanSquareLoss', ({}, {}), '()', False, 'from lib.core.loss import MeanSquareLoss\n'), ((30, 14, 30, 22), 'mxnet.cpu', 'mx.cpu', ({}, {}), '()', True, 'import mxnet as mx\n'), ((85, 26, 85, 64), 'mxnet.io.PrefetchingIter', 'mx.io.PrefetchingIter', ({(85, 48, 85, 63): 'train_data_iter'}, {}), '(train_data_iter)', True, 'import mxnet as mx\n'), ((24, 46, 24, 68), 'pprint.pformat', 'pprint.pformat', ({(24, 61, 24, 67): 'config'}, {}), '(config)', False, 'import pprint\n'), ((62, 28, 62, 44), 'mxnet.init.MSRAPrelu', 'init.MSRAPrelu', ({}, {}), '()', False, 'from mxnet import init\n')]
WangFeng18/dino
FastLinear/generate_memory_bank.py
1a4e49bd0e99d7e205338b14994a1d57c3084cfe
import os from tqdm import tqdm import torch.backends.cudnn as cudnn import torch from datasets import ImageNetInstance, ImageNetInstanceLMDB from torchvision import transforms import argparse from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network from torch.utils.data import DataLoader from PIL import ImageFile, Image import torch.distributed as dist from lars import * ImageFile.LOAD_TRUNCATED_IMAGES = True import warnings warnings.filterwarnings('ignore') def concat_all_gather(tensor): """ Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. """ tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return output def main(): parser = argparse.ArgumentParser("The first stage of BoostrapSelfSup") parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed parallel') parser.add_argument("--task", type=str, default="moco", help="the pretraining models") parser.add_argument("--pretrained_path", type=str, default="", help="the pretraining models") parser.add_argument("--save_path", type=str, default="", help="where to save the memory_bank") parser.add_argument("--backbone", type=str, default="resnet50") parser.add_argument("--data_path", type=str, default="~/ILSVRC2012/", help="the data path") parser.add_argument("--batch_size", type=int, default=32, help="batch size") parser.add_argument("--img_size", type=int, default=224, help="image size") parser.add_argument("--feat_dim", type=int, default=128, help="feat dimension") parser.add_argument("--feature_layer", type=str, default='lowdim', help="feature layer") parser.add_argument('--use-lmdb', action='store_true') args = parser.parse_args() pretrained_path = os.path.expanduser(args.pretrained_path) save_path = os.path.expanduser(args.save_path) data_path = os.path.expanduser(args.data_path) batch_size = args.batch_size feat_dim = args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) # network = ResNet(50, frozen_stages=4) if args.task == 'moco': network = get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'swav': network = get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'selfboost': network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'minmaxent': network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'dino': network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'simclr': network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'sup': network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) else: raise NotImplementedError network.cuda(args.local_rank) network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark = True augmentation = transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if args.use_lmdb: train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation) val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation) else: train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation) val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank) n_train_points = len(train_dataset) n_val_points = len(val_dataset) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4) print("Initializing train memory bank: {} points.".format(n_train_points)) train_memory_bank = torch.zeros(n_train_points, feat_dim).to("cpu").detach() print("Initializing val memory bank: {} points.".format(n_val_points)) val_memory_bank = torch.zeros(n_val_points, feat_dim).to("cpu").detach() network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0) for data in tqdm(train_dataloader): idx, img, _ = data idx = idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'): feature = network(img) else: feature = network.module.get_intermediate_layers(img, 4) feature = [x[:, 0] for x in feature] feature = torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): train_memory_bank[idx,:] = feature.detach().cpu() for data in tqdm(val_dataloader): idx, img, _ = data idx = idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'): feature = network(img) else: feature = network.module.get_intermediate_layers(img, 4) feature = [x[:, 0] for x in feature] feature = torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): val_memory_bank[idx,:] = feature.detach().cpu() if args.local_rank == 0: torch.save( {'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank }, args.save_path ) if __name__ == '__main__': main()
[((15, 0, 15, 33), 'warnings.filterwarnings', 'warnings.filterwarnings', ({(15, 24, 15, 32): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((24, 4, 24, 72), 'torch.distributed.all_gather', 'torch.distributed.all_gather', (), '', False, 'import torch\n'), ((26, 13, 26, 45), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((30, 13, 30, 74), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({(30, 37, 30, 73): '"""The first stage of BoostrapSelfSup"""'}, {}), "('The first stage of BoostrapSelfSup')", False, 'import argparse\n'), ((44, 22, 44, 62), 'os.path.expanduser', 'os.path.expanduser', ({(44, 41, 44, 61): 'args.pretrained_path'}, {}), '(args.pretrained_path)', False, 'import os\n'), ((45, 16, 45, 50), 'os.path.expanduser', 'os.path.expanduser', ({(45, 35, 45, 49): 'args.save_path'}, {}), '(args.save_path)', False, 'import os\n'), ((46, 16, 46, 50), 'os.path.expanduser', 'os.path.expanduser', ({(46, 35, 46, 49): 'args.data_path'}, {}), '(args.data_path)', False, 'import os\n'), ((50, 4, 50, 43), 'torch.distributed.init_process_group', 'dist.init_process_group', (), '', True, 'import torch.distributed as dist\n'), ((51, 4, 51, 42), 'torch.cuda.set_device', 'torch.cuda.set_device', ({(51, 26, 51, 41): 'args.local_rank'}, {}), '(args.local_rank)', False, 'import torch\n'), ((72, 14, 72, 94), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (), '', False, 'import torch\n'), ((91, 20, 91, 119), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (), '', False, 'import torch\n'), ((92, 18, 92, 115), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (), '', False, 'import torch\n'), ((96, 23, 96, 126), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((97, 23, 97, 124), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((108, 16, 108, 38), 'tqdm.tqdm', 'tqdm', ({(108, 21, 108, 37): 'train_dataloader'}, {}), '(train_dataloader)', False, 'from tqdm import tqdm\n'), ((125, 16, 125, 36), 'tqdm.tqdm', 'tqdm', ({(125, 21, 125, 35): 'val_dataloader'}, {}), '(val_dataloader)', False, 'from tqdm import tqdm\n'), ((22, 22, 22, 45), 'torch.ones_like', 'torch.ones_like', ({(22, 38, 22, 44): 'tensor'}, {}), '(tensor)', False, 'import torch\n'), ((55, 18, 55, 85), 'BaseTaskModel.task_network.get_moco_network', 'get_moco_network', (), '', False, 'from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network\n'), ((85, 24, 85, 108), 'datasets.ImageNetInstanceLMDB', 'ImageNetInstanceLMDB', (), '', False, 'from datasets import ImageNetInstance, ImageNetInstanceLMDB\n'), ((86, 22, 86, 104), 'datasets.ImageNetInstanceLMDB', 'ImageNetInstanceLMDB', (), '', False, 'from datasets import ImageNetInstance, ImageNetInstanceLMDB\n'), ((143, 8, 148, 9), 'torch.save', 'torch.save', ({(144, 12, 146, 13): "{'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank}", (147, 12, 147, 26): 'args.save_path'}, {}), "({'train_memory_bank': train_memory_bank, 'val_memory_bank':\n val_memory_bank}, args.save_path)", False, 'import torch\n'), ((57, 18, 57, 85), 'BaseTaskModel.task_network.get_swav_network', 'get_swav_network', (), '', False, 'from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network\n'), ((79, 12, 79, 48), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', ({(79, 34, 79, 47): 'args.img_size'}, {}), '(args.img_size)', False, 'from torchvision import transforms\n'), ((80, 12, 80, 33), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import transforms\n'), ((81, 12, 81, 87), 'torchvision.transforms.Normalize', 'transforms.Normalize', (), '', False, 'from torchvision import transforms\n'), ((117, 22, 117, 48), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((122, 13, 122, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((134, 22, 134, 48), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((139, 13, 139, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((23, 23, 23, 57), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ({}, {}), '()', False, 'import torch\n'), ((59, 18, 59, 90), 'BaseTaskModel.task_network.get_selfboost_network', 'get_selfboost_network', (), '', False, 'from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network\n'), ((88, 46, 88, 78), 'os.path.join', 'os.path.join', ({(88, 59, 88, 68): 'data_path', (88, 70, 88, 77): '"""train"""'}, {}), "(data_path, 'train')", False, 'import os\n'), ((89, 44, 89, 74), 'os.path.join', 'os.path.join', ({(89, 57, 89, 66): 'data_path', (89, 68, 89, 73): '"""val"""'}, {}), "(data_path, 'val')", False, 'import os\n'), ((61, 18, 61, 105), 'BaseTaskModel.task_network.get_minmaxent_network', 'get_minmaxent_network', (), '', False, 'from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network\n'), ((100, 24, 100, 61), 'torch.zeros', 'torch.zeros', ({(100, 36, 100, 50): 'n_train_points', (100, 52, 100, 60): 'feat_dim'}, {}), '(n_train_points, feat_dim)', False, 'import torch\n'), ((103, 22, 103, 57), 'torch.zeros', 'torch.zeros', ({(103, 34, 103, 46): 'n_val_points', (103, 48, 103, 56): 'feat_dim'}, {}), '(n_val_points, feat_dim)', False, 'import torch\n'), ((63, 18, 63, 100), 'BaseTaskModel.task_network.get_dino_network', 'get_dino_network', (), '', False, 'from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network\n'), ((65, 18, 65, 102), 'BaseTaskModel.task_network.get_simclr_network', 'get_simclr_network', (), '', False, 'from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network\n'), ((67, 18, 67, 99), 'BaseTaskModel.task_network.get_sup_network', 'get_sup_network', (), '', False, 'from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network\n')]
anuragtr/fabric8-analytics-rudra
tests/utils/test_mercator.py
13fb15539d195fcb89ced02b205d034ec0c18e00
import pytest from rudra.utils.mercator import SimpleMercator class TestSimpleMercator: pom_xml_content = """ <project> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> <dependency> <groupId>grp2.id</groupId> <artifactId>art2.id</artifactId> </dependency> <dependency> <groupId>grp3.id</groupId> <artifactId>art3.id</artifactId> <scope>test</scope> </dependency> </dependencies> </project> """ def test_get_dependencies(self): client = SimpleMercator(self.pom_xml_content) deps = client.get_dependencies() assert len(deps) == 3 artifact_ids = [d.artifact_id for d in deps] assert not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids)) group_ids = [d.group_id for d in deps] assert not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids)) scopes = [d.scope for d in deps] assert not {'compile', 'test'}.difference(set(scopes)) def test_get_dependencies_with_no_dependencies(self): client = SimpleMercator('<project></project>'.encode()) deps = client.get_dependencies() assert len(deps) == 0 def test_get_dependencies_with_no_content(self): with pytest.raises(ValueError, match='Empty Content .*'): SimpleMercator('') def test_find_data_corrupt_pom(self): content = """ </project> </project> <dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </project> """ client = SimpleMercator(content) deps = client.get_dependencies() assert len(deps) == 1 artifact_ids = [d.artifact_id for d in deps] assert 'art1.id' in artifact_ids
[((29, 17, 29, 53), 'rudra.utils.mercator.SimpleMercator', 'SimpleMercator', ({(29, 32, 29, 52): 'self.pom_xml_content'}, {}), '(self.pom_xml_content)', False, 'from rudra.utils.mercator import SimpleMercator\n'), ((68, 17, 68, 40), 'rudra.utils.mercator.SimpleMercator', 'SimpleMercator', ({(68, 32, 68, 39): 'content'}, {}), '(content)', False, 'from rudra.utils.mercator import SimpleMercator\n'), ((45, 13, 45, 64), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((46, 12, 46, 30), 'rudra.utils.mercator.SimpleMercator', 'SimpleMercator', ({(46, 27, 46, 29): '""""""'}, {}), "('')", False, 'from rudra.utils.mercator import SimpleMercator\n')]
stjordanis/mljar-supervised
tests/checks/run_performance_tests.py
8c3f9d1ed527dfcfdaef91cf82e2779c5832e294
import os import sys import unittest from tests.tests_bin_class.test_performance import * if __name__ == "__main__": unittest.main()
[((8, 4, 8, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n')]
wookiee2187/vc3-login-pod
task/CheckAllocations.py
3c0f5490c094bf0b4587a743efac68d722ea5ee2
#!/usr/bin/env python from vc3master.task import VC3Task class CheckAllocations(VC3Task): ''' Plugin to do consistency/sanity checks on Allocations. ''' def runtask(self): ''' ''' self.log.info("Running task %s" % self.section)
[]
Captricity/airbrake-django
django_airbrake/utils/client.py
2ea126653883732a13f1a80c9e567b7076601620
import sys import traceback from django.conf import settings from django.urls import resolve from lxml import etree from six.moves.urllib.request import urlopen, Request class Client(object): API_URL = '%s://airbrake.io/notifier_api/v2/notices' ERRORS = { 403: "Cannot use SSL", 422: "Invalid XML sent to Airbrake", 500: "Airbrake has braked too hard", } DEFAULTS = { 'TIMEOUT': 5, 'USE_SSL': False, } @property def url(self): scheme = 'http' if self.settings['USE_SSL']: scheme = 'https' return Client.API_URL % scheme @property def settings(self): if getattr(self, '_settings', None): return self._settings self._settings = Client.DEFAULTS self._settings.update(getattr(settings, 'AIRBRAKE', {})) return self._settings def notify(self, exception=None, request=None): headers = { 'Content-Type': 'text/xml' } payload = self._generate_xml(exception=exception, request=request) req = Request(self.url, payload, headers) resp = urlopen(req, timeout=self.settings['TIMEOUT']) status = resp.getcode() if status == 200: return True elif status in Client.ERRORS: raise Exception(Client.ERRORS[status]) def _generate_xml(self, exception=None, request=None): _, _, trace = sys.exc_info() notice_em = etree.Element('notice', version='2.0') tb = traceback.extract_tb(trace) api_key = etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY'] notifier_em = etree.SubElement(notice_em, 'notifier') etree.SubElement(notifier_em, 'name').text = 'django-airbrake' etree.SubElement(notifier_em, 'version').text = '0.0.4' url_el = etree.SubElement(notifier_em, 'url') url_el.text = 'http://example.com' if request: request_em = etree.SubElement(notice_em, 'request') if request.is_secure(): scheme = 'https' else: scheme = 'http' url = '%s://%s%s' % (scheme, request.get_host(), request.get_full_path()) etree.SubElement(request_em, 'url').text = str(url) url_el.text = url cb, _, _ = resolve(request.path) etree.SubElement(request_em, 'component').text = str(cb.__module__) etree.SubElement(request_em, 'action').text = str(cb.__name__) if 'context' in self.settings: cgi_em = etree.SubElement(request_em, 'cgi-data') for key, val in list(self.settings['context'].items()): var = etree.SubElement(cgi_em, 'var') var.set('key', str(key)) var.text = str(val) session = list(request.session.items()) if len(session): session_em = etree.SubElement(request_em, 'session') for key, val in session: var = etree.SubElement(session_em, 'var') var.set('key', str(key)) var.text = str(val) if exception: error_em = etree.SubElement(notice_em, 'error') etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__) etree.SubElement(error_em, 'message').text = str(exception) backtrace_em = etree.SubElement(error_em, 'backtrace') for line in tb: etree.SubElement(backtrace_em, 'line', file=str(line[0]), number=str(line[1]), method=str(line[2])) env_em = etree.SubElement(notice_em, 'server-environment') etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT', 'development') return '<?xml version="1.0" encoding="UTF-8"?>%s' % etree.tostring(notice_em)
[((45, 14, 45, 49), 'six.moves.urllib.request.Request', 'Request', ({(45, 22, 45, 30): 'self.url', (45, 32, 45, 39): 'payload', (45, 41, 45, 48): 'headers'}, {}), '(self.url, payload, headers)', False, 'from six.moves.urllib.request import urlopen, Request\n'), ((46, 15, 46, 61), 'six.moves.urllib.request.urlopen', 'urlopen', (), '', False, 'from six.moves.urllib.request import urlopen, Request\n'), ((55, 22, 55, 36), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n'), ((56, 20, 56, 58), 'lxml.etree.Element', 'etree.Element', (), '', False, 'from lxml import etree\n'), ((58, 13, 58, 40), 'traceback.extract_tb', 'traceback.extract_tb', ({(58, 34, 58, 39): 'trace'}, {}), '(trace)', False, 'import traceback\n'), ((62, 22, 62, 61), 'lxml.etree.SubElement', 'etree.SubElement', ({(62, 39, 62, 48): 'notice_em', (62, 50, 62, 60): '"""notifier"""'}, {}), "(notice_em, 'notifier')", False, 'from lxml import etree\n'), ((66, 17, 66, 53), 'lxml.etree.SubElement', 'etree.SubElement', ({(66, 34, 66, 45): 'notifier_em', (66, 47, 66, 52): '"""url"""'}, {}), "(notifier_em, 'url')", False, 'from lxml import etree\n'), ((113, 17, 113, 66), 'lxml.etree.SubElement', 'etree.SubElement', ({(113, 34, 113, 43): 'notice_em', (113, 45, 113, 65): '"""server-environment"""'}, {}), "(notice_em, 'server-environment')", False, 'from lxml import etree\n'), ((60, 18, 60, 56), 'lxml.etree.SubElement', 'etree.SubElement', ({(60, 35, 60, 44): 'notice_em', (60, 46, 60, 55): '"""api-key"""'}, {}), "(notice_em, 'api-key')", False, 'from lxml import etree\n'), ((64, 8, 64, 45), 'lxml.etree.SubElement', 'etree.SubElement', ({(64, 25, 64, 36): 'notifier_em', (64, 38, 64, 44): '"""name"""'}, {}), "(notifier_em, 'name')", False, 'from lxml import etree\n'), ((65, 8, 65, 48), 'lxml.etree.SubElement', 'etree.SubElement', ({(65, 25, 65, 36): 'notifier_em', (65, 38, 65, 47): '"""version"""'}, {}), "(notifier_em, 'version')", False, 'from lxml import etree\n'), ((70, 25, 70, 63), 'lxml.etree.SubElement', 'etree.SubElement', ({(70, 42, 70, 51): 'notice_em', (70, 53, 70, 62): '"""request"""'}, {}), "(notice_em, 'request')", False, 'from lxml import etree\n'), ((81, 23, 81, 44), 'django.urls.resolve', 'resolve', ({(81, 31, 81, 43): 'request.path'}, {}), '(request.path)', False, 'from django.urls import resolve\n'), ((115, 8, 115, 52), 'lxml.etree.SubElement', 'etree.SubElement', ({(115, 25, 115, 31): 'env_em', (115, 33, 115, 51): '"""environment-name"""'}, {}), "(env_em, 'environment-name')", False, 'from lxml import etree\n'), ((117, 60, 117, 85), 'lxml.etree.tostring', 'etree.tostring', ({(117, 75, 117, 84): 'notice_em'}, {}), '(notice_em)', False, 'from lxml import etree\n'), ((78, 12, 78, 47), 'lxml.etree.SubElement', 'etree.SubElement', ({(78, 29, 78, 39): 'request_em', (78, 41, 78, 46): '"""url"""'}, {}), "(request_em, 'url')", False, 'from lxml import etree\n'), ((82, 12, 82, 53), 'lxml.etree.SubElement', 'etree.SubElement', ({(82, 29, 82, 39): 'request_em', (82, 41, 82, 52): '"""component"""'}, {}), "(request_em, 'component')", False, 'from lxml import etree\n'), ((83, 12, 83, 50), 'lxml.etree.SubElement', 'etree.SubElement', ({(83, 29, 83, 39): 'request_em', (83, 41, 83, 49): '"""action"""'}, {}), "(request_em, 'action')", False, 'from lxml import etree\n'), ((85, 25, 85, 65), 'lxml.etree.SubElement', 'etree.SubElement', ({(85, 42, 85, 52): 'request_em', (85, 54, 85, 64): '"""cgi-data"""'}, {}), "(request_em, 'cgi-data')", False, 'from lxml import etree\n'), ((93, 29, 93, 68), 'lxml.etree.SubElement', 'etree.SubElement', ({(93, 46, 93, 56): 'request_em', (93, 58, 93, 67): '"""session"""'}, {}), "(request_em, 'session')", False, 'from lxml import etree\n'), ((100, 27, 100, 63), 'lxml.etree.SubElement', 'etree.SubElement', ({(100, 44, 100, 53): 'notice_em', (100, 55, 100, 62): '"""error"""'}, {}), "(notice_em, 'error')", False, 'from lxml import etree\n'), ((105, 31, 105, 70), 'lxml.etree.SubElement', 'etree.SubElement', ({(105, 48, 105, 56): 'error_em', (105, 58, 105, 69): '"""backtrace"""'}, {}), "(error_em, 'backtrace')", False, 'from lxml import etree\n'), ((87, 26, 87, 57), 'lxml.etree.SubElement', 'etree.SubElement', ({(87, 43, 87, 49): 'cgi_em', (87, 51, 87, 56): '"""var"""'}, {}), "(cgi_em, 'var')", False, 'from lxml import etree\n'), ((95, 26, 95, 61), 'lxml.etree.SubElement', 'etree.SubElement', ({(95, 43, 95, 53): 'session_em', (95, 55, 95, 60): '"""var"""'}, {}), "(session_em, 'var')", False, 'from lxml import etree\n'), ((102, 16, 102, 51), 'lxml.etree.SubElement', 'etree.SubElement', ({(102, 33, 102, 41): 'error_em', (102, 43, 102, 50): '"""class"""'}, {}), "(error_em, 'class')", False, 'from lxml import etree\n'), ((103, 16, 103, 53), 'lxml.etree.SubElement', 'etree.SubElement', ({(103, 33, 103, 41): 'error_em', (103, 43, 103, 52): '"""message"""'}, {}), "(error_em, 'message')", False, 'from lxml import etree\n')]
jean1042/plugin-azure-cloud-services
src/spaceone/inventory/connector/snapshot.py
3a75a516c9a4d1e8a4962988934ead3fd40e8494
import logging from spaceone.inventory.libs.connector import AzureConnector from spaceone.inventory.error import * from spaceone.inventory.error.custom import * __all__ = ['SnapshotConnector'] _LOGGER = logging.getLogger(__name__) class SnapshotConnector(AzureConnector): def __init__(self, **kwargs): super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data')) def list_snapshots(self): try: return self.compute_client.snapshots.list() except ConnectionError: _LOGGER.error(ERROR_CONNECTOR(field='Public IP Address'))
[((7, 10, 7, 37), 'logging.getLogger', 'logging.getLogger', ({(7, 28, 7, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n')]
theasylum/wired
docs/tutorial/context/app.py
6b6a3e83702b18ebb41ca1f94e957bdf7e44986d
""" A customer walks into a store. Do the steps to interact with them: - Get *a* (not *the*) greeter - Interact with them Simple wired application: - Settings that say what punctuation to use - Registry - Two factories that says hello, one for the FrenchCustomer context - A default Customer and FrenchCustomer """ from dataclasses import dataclass from wired import ServiceRegistry @dataclass class Customer: name: str @dataclass class FrenchCustomer(Customer): pass @dataclass class Settings: punctuation: str @dataclass class Greeter: punctuation: str greeting: str = 'Hello' def __call__(self, customer: Customer) -> str: return f'{self.greeting} {customer.name} {self.punctuation}' @dataclass class FrenchGreeter(Greeter): greeting: str = 'Bonjour' def __call__(self, customer: Customer) -> str: return f'{self.greeting} {customer.name} {self.punctuation}' def setup(settings: Settings) -> ServiceRegistry: # Make the registry registry = ServiceRegistry() # Make the greeter factories, using punctuation from settings punctuation = settings.punctuation # First the default greeter, no context def default_greeter_factory(container) -> Greeter: # Use the dataclass default for greeting return Greeter(punctuation=punctuation) # Register it as a factory using its class for the "key" registry.register_factory(default_greeter_factory, Greeter) # Now the French greeter, using context of FrenchCustomer def french_greeter_factory(container) -> Greeter: # Use the dataclass default for greeting return FrenchGreeter(punctuation=punctuation) # Register it as a factory using its class for the "key", but # this time register with a "context" registry.register_factory( french_greeter_factory, Greeter, context=FrenchCustomer ) return registry def greet_customer(registry: ServiceRegistry, customer: Customer) -> str: # A customer comes in, handle the steps in the greeting # as a container. container = registry.create_container() # Get a Greeter using the customer as context. Use the Customer when # generating the greeting. greeter: Greeter = container.get(Greeter, context=customer) greeting = greeter(customer) return greeting def main(): settings = Settings(punctuation='!!') registry = setup(settings) # *** Default Customer # Make a Customer, pass into the "greet_customer" interaction, # then test the result. customer = Customer(name='Mary') assert 'Hello Mary !!' == greet_customer(registry, customer) # *** French Customer # Make a FrenchCustomer, pass into the "greet_customer" interaction, # then test the result. french_customer = FrenchCustomer(name='Henri') assert 'Bonjour Henri !!' == greet_customer(registry, french_customer)
[((59, 15, 59, 32), 'wired.ServiceRegistry', 'ServiceRegistry', ({}, {}), '()', False, 'from wired import ServiceRegistry\n')]
GeoSensorWebLab/FEAST_PtE
feast/DetectionModules/ldar_program.py
63ff8b7925873d756666f3c0c4b9f0f84abd5eb2
""" This module defines the LDARProgram class. """ import numpy as np import copy from .repair import Repair from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous class LDARProgram: """ An LDAR program contains one or more detection methods and one or more repair methods. Each LDAR program records the find and repair costs associated with all detection and repair methods in the program. The LDAR program deploys runs the action methods of each detection and repair method contained in the program. The detection and repair methods determine their own behavior at each time step. """ def __init__(self, gas_field, tech_dict): """ :param gas_field: a GasField object :param tech_dict: a dict containing all of the detection methods to be employed by the LDAR program. The dict must have the form {"name": DetectionMethod}. All of the relationships between detection methods and between detection methods and repair methods must be defined by the dispatch_objects specified for each method. """ self.emissions = copy.deepcopy(gas_field.emissions) self.emissions_timeseries = [] self.vents_timeseries = [] #self.emissions_results = ResultContinuous(units='g/s') #self.vents_results = ResultContinuous(units='g/s') self.tech_dict = tech_dict self.repair = {} self.repair_cost = ResultDiscrete(units='USD') for tech_name, tech in tech_dict.items(): if type(tech.dispatch_object) is Repair: self.repair[tech_name + ' ' + tech.dispatch_object.name] = tech.dispatch_object def action(self, time, gas_field): """ Runs the detect method for every tech in tech_dict and runs the repair method :param time: the simulation time object :param gas_field: the simulation gas_field object :return: """ for i, tech in enumerate(self.tech_dict.values()): if hasattr(tech, 'survey_interval') and tech.survey_interval \ and np.mod(time.current_time, tech.survey_interval) < time.delta_t: tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites, dtype=int))) tech.detect(time, gas_field, self.emissions.get_current_emissions(time)) for rep in self.repair.values(): rep.repair(time, self.emissions) def calc_rep_costs(self, time): """ Calculates the total repair costs up to time.current_time, assuming that all reparable emissions that have a max end_time less than time.current_time have been repaired. :param time: a FEAST time object :return: None """ for em in self.emissions.emissions.index.unique(): empdf_temp = self.emissions.emissions.loc[[em]] max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0] if max_row.reparable & (max_row.end_time < time.current_time): self.repair_cost.append_entry([max_row.end_time, max_row.repair_cost])
[((25, 25, 25, 59), 'copy.deepcopy', 'copy.deepcopy', ({(25, 39, 25, 58): 'gas_field.emissions'}, {}), '(gas_field.emissions)', False, 'import copy\n'), ((46, 24, 46, 71), 'numpy.mod', 'np.mod', ({(46, 31, 46, 48): 'time.current_time', (46, 50, 46, 70): 'tech.survey_interval'}, {}), '(time.current_time, tech.survey_interval)', True, 'import numpy as np\n'), ((47, 33, 47, 100), 'numpy.linspace', 'np.linspace', (), '', True, 'import numpy as np\n')]
sjmoran/SIDGAN
src/CycleGAN.py
169bd69974bbb7f5760c28a00c231a856017e51c
#Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. #This program is free software; you can redistribute it and/or modify it under the terms of the BSD 0-Clause License. #This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the BSD 0-Clause License for more details. from keras.optimizers import Adam from models.ICCV_architectures import * from models.unet import * from keras.engine.topology import Network import sys import tensorflow as tf from utilities.data_loader import * class CycleGAN(): def __init__(self, opt, image_shape=(256 * 1, 256 * 1, 3), load_training_data=True, normalization=InstanceNormalization, ): self.task = opt.task self.im_w = opt.im_w self.im_h = opt.im_h self.data_root = opt.data_root self.img_shape = image_shape self.channels = self.img_shape[-1] # Fetch data during training instead of pre caching all images self.use_data_generator = True self.generator_architecture = opt.generator_architecture self.use_norm = opt.use_norm self.add_extra_conv = opt.add_extra_conv self.image_shapeA = (opt.im_w * 1, opt.im_h * 1, 3) self.image_shapeA_in = (None, None, 3) if self.task == 'Long2Short_raw': self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 1) self.image_shapeB_in = (None, None, 3) else: self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 3) self.image_shapeB_in = (None, None, 3) # Identity loss - sometimes send images from B to G_A2B (and the opposite) to teach identity mappings self.use_identity_learning = opt.use_identity_learning self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity mapping will be done each time the iteration number is divisable with this number # PatchGAN - if false the discriminator learning rate should be decreased self.use_patchgan = opt.use_patchgan self.normalization = normalization # Loss hyperparameters self.lambda_1 = opt.lambda_1 # Cyclic loss weight A_2_B self.lambda_2 = opt.lambda_2 # Cyclic loss weight B_2_A self.lambda_D = opt.lambda_D # Weight for loss from discriminator guess on synthetic images # Learning rates self.learning_rate_D = opt.lr_D self.learning_rate_G = opt.lr_G self.beta_1 = opt.beta_1 self.beta_2 = opt.beta_2 self.batch_size = 1 self.clipvalue = opt.clipvalue self.epsilon_norm = opt.epsilon_norm # self.crop_res = opt.crop_res # Resize convolution - instead of transpose convolution in deconvolution layers (uk) - can reduce checkerboard artifacts but the blurring might affect the cycle-consistency self.use_resize_convolution = opt.use_resize_convolution # Supervised learning part self.use_supervised_learning = opt.use_supervised_learning self.supervised_weight = opt.supervised_weight self.supervised_loss = opt.supervised_loss # optimizer if opt.clipvalue is not None: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue) else: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2) # # ======= Discriminator model ========== if self.generator_architecture == 'ICCV': D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan, disc_use_4_layers=True) D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan, disc_use_4_layers=True) loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images elif self.generator_architecture == 'unet_mini': D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images # Discriminator builds image_A = Input(self.image_shapeA) image_B = Input(self.image_shapeB) guess_A = D_A(image_A) guess_B = D_B(image_B) self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model') self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model') if self.use_patchgan: self.D_A.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) else: self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) # Use Networks to avoid falsy keras error about weight descripancies self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model') self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model') # ============= Generator models ======================= # Do note update discriminator weights during generator training self.D_A_static.trainable = False self.D_B_static.trainable = False # Generators if self.generator_architecture == 'ICCV': self.G_A2B = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA, output=self.image_shapeB, name='G_A2B_model') self.G_B2A = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB, output=self.image_shapeA, name='G_B2A_model') elif self.generator_architecture == 'unet_mini': self.G_A2B = unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_A2B_model') self.G_B2A = unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_B2A_model') if self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G, loss='MAE') self.G_B2A.compile(optimizer=self.opt_G, loss='MAE') # Generator builds real_A = Input(shape=self.image_shapeA, name='real_A') real_B = Input(shape=self.image_shapeB, name='real_B') synthetic_B = self.G_A2B(real_A) synthetic_A = self.G_B2A(real_B) dA_guess_synthetic = self.D_A_static(synthetic_A) dB_guess_synthetic = self.D_B_static(synthetic_B) reconstructed_A = self.G_B2A(synthetic_B) reconstructed_B = self.G_A2B(synthetic_A) model_outputs = [reconstructed_A, reconstructed_B] compile_losses = [self.cycle_loss, self.cycle_loss, self.lse, self.lse] compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D] model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic) if self.use_supervised_learning: model_outputs.append(synthetic_A) model_outputs.append(synthetic_B) if self.supervised_loss == 'MAE': compile_losses.append('MAE') compile_losses.append('MAE') compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight) self.G_model = Model(inputs=[real_A, real_B], outputs=model_outputs, name='G_model') self.G_model.compile(optimizer=self.opt_G, loss=compile_losses, loss_weights=compile_weights) # ======= Data ========== # Use 'None' to fetch all available images nr_A_test_imgs = 1000 nr_B_test_imgs = 1000 if self.use_data_generator: print('--- Using dataloader during training ---') else: print('--- Caching data ---') sys.stdout.flush() if load_training_data: if self.use_data_generator: self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size, crop_size=self.im_w, generator=True) # Only store test images if opt.task == 'Vimeo2Long_SID': self.A_test, self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs, nr_B_test_imgs) else: self.A_test = [] self.B_test = [] self.A_train = [] self.B_train = [] if not self.use_data_generator: print('Data has been loaded') def load_model_and_weights(self, model, weights_path, iteration, by_name): name = model.name + '_weights_epoch_' + str(iteration) final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name)) model.load_weights(final_path, by_name=by_name) def print_info(self): print('fInitializing Cycle GAN with parameters ...') print('task: ', self.task) print('generator architecture: ', self.generator_architecture) print('image width: ', self.im_w) print('image height: ', self.im_h) print('learning date G: ', self.learning_rate_G) print('learning date D: ', self.learning_rate_D) print('use patchGAN: ', self.use_patchgan) print('use_identity_learning: ', self.use_identity_learning) print('normalization: ', self.normalization) print('identity_mapping_modulus: ', self.identity_mapping_modulus) print('lambda_1: ', self.lambda_1) print('lambda_2: ', self.lambda_2) print('lambda_D: ', self.lambda_D) print('beta_1: ', self.beta_1) print('beta_2: ', self.beta_2) print('use_supervised_learning: ', self.use_supervised_learning) print('supervised_weight: ', self.supervised_weight) print('supervised_loss: ', self.supervised_loss) def lse(self, y_true, y_pred): loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true)) return loss def cycle_loss(self, y_true, y_pred): loss = tf.reduce_mean(tf.abs(y_pred - y_true)) return loss
[((135, 26, 135, 91), 'keras.engine.topology.Network', 'Network', (), '', False, 'from keras.engine.topology import Network\n'), ((136, 26, 136, 91), 'keras.engine.topology.Network', 'Network', (), '', False, 'from keras.engine.topology import Network\n'), ((219, 8, 219, 26), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((86, 25, 86, 103), 'keras.optimizers.Adam', 'Adam', (), '', False, 'from keras.optimizers import Adam\n'), ((87, 25, 87, 103), 'keras.optimizers.Adam', 'Adam', (), '', False, 'from keras.optimizers import Adam\n'), ((89, 25, 89, 77), 'keras.optimizers.Adam', 'Adam', ({(89, 30, 89, 50): 'self.learning_rate_D', (89, 52, 89, 63): 'self.beta_1', (89, 65, 89, 76): 'self.beta_2'}, {}), '(self.learning_rate_D, self.beta_1, self.beta_2)', False, 'from keras.optimizers import Adam\n'), ((90, 25, 90, 77), 'keras.optimizers.Adam', 'Adam', ({(90, 30, 90, 50): 'self.learning_rate_G', (90, 52, 90, 63): 'self.beta_1', (90, 65, 90, 76): 'self.beta_2'}, {}), '(self.learning_rate_G, self.beta_1, self.beta_2)', False, 'from keras.optimizers import Adam\n'), ((267, 30, 267, 67), 'tensorflow.squared_difference', 'tf.squared_difference', ({(267, 52, 267, 58): 'y_pred', (267, 60, 267, 66): 'y_true'}, {}), '(y_pred, y_true)', True, 'import tensorflow as tf\n'), ((271, 30, 271, 53), 'tensorflow.abs', 'tf.abs', ({(271, 37, 271, 52): 'y_pred - y_true'}, {}), '(y_pred - y_true)', True, 'import tensorflow as tf\n')]
edson-dev/neoway
application/fastapi/main.py
f792e16c0f627e8b94b54f001e87e076f36311ab
import uvicorn from fastapi import FastAPI from fastapi.staticfiles import StaticFiles from routes import doc, api from fastapi.templating import Jinja2Templates from starlette.requests import Request # configure static and templates file on jinja 2 app = FastAPI( title=f"Technical Case", description=f"endpoint para subir planilhas para banco de dados relacional Postgres.", version=f"0.0.1", static_directory="static" ) app.mount("/static", StaticFiles(directory="static"), name="static") #import factory builders and initiate doc.init_app(app) api.init_app(app, "/api") # templates = Jinja2Templates(directory="templates") #views @app.get("/", tags=["/view"]) async def index(request: Request): return templates.TemplateResponse("index.html", {"request": request}) if __name__ == "__main__": uvicorn.run("main:app", host="0.0.0.0", port=8080)
[((10, 6, 15, 1), 'fastapi.FastAPI', 'FastAPI', (), '', False, 'from fastapi import FastAPI\n'), ((19, 0, 19, 17), 'routes.doc.init_app', 'doc.init_app', ({(19, 13, 19, 16): 'app'}, {}), '(app)', False, 'from routes import doc, api\n'), ((20, 0, 20, 25), 'routes.api.init_app', 'api.init_app', ({(20, 13, 20, 16): 'app', (20, 18, 20, 24): '"""/api"""'}, {}), "(app, '/api')", False, 'from routes import doc, api\n'), ((23, 12, 23, 50), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', (), '', False, 'from fastapi.templating import Jinja2Templates\n'), ((16, 21, 16, 52), 'fastapi.staticfiles.StaticFiles', 'StaticFiles', (), '', False, 'from fastapi.staticfiles import StaticFiles\n'), ((30, 4, 30, 54), 'uvicorn.run', 'uvicorn.run', (), '', False, 'import uvicorn\n')]
jsfalk/civis-python
civis/io/_tables.py
39b6498b2d67d838d720d9631d74f3d3d43f7c1a
import json import concurrent.futures import csv from os import path import io import logging import os import shutil from tempfile import TemporaryDirectory import warnings import zlib import gzip import zipfile from civis import APIClient from civis._utils import maybe_get_random_name from civis.base import EmptyResultError, CivisImportError from civis.futures import CivisFuture from civis.io import civis_to_file, file_to_civis, query_civis from civis.utils import run_job from civis._deprecation import deprecate_param import requests try: from io import StringIO except ImportError: from cStringIO import StringIO try: import pandas as pd NO_PANDAS = False except ImportError: NO_PANDAS = True CHUNK_SIZE = 32 * 1024 log = logging.getLogger(__name__) __all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv', 'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis', 'civis_file_to_table', 'split_schema_tablename', 'export_to_civis_file'] DELIMITERS = { ',': 'comma', '\t': 'tab', '|': 'pipe', } @deprecate_param('v2.0.0', 'api_key') def read_civis(table, database, columns=None, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs): """Read data from a Civis table. Parameters ---------- table : str Name of table, including schema, in the database. E.g. ``'my_schema.my_table'``. Schemas or tablenames with periods must be double quoted, e.g. ``'my_schema."my.table"'``. database : str or int Read data from this database. Can be the database name or ID. columns : list, optional A list of column names. Column SQL transformations are possible. If omitted, all columns are exported. use_pandas : bool, optional If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise, return a list of results from :func:`python:csv.reader`. job_name : str, optional A name to give the job. If omitted, a random job name will be used. api_key : DEPRECATED str, optional Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY` environment variable will be used. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The database credential ID. If ``None``, the default credential will be used. polling_interval : int or float, optional Number of seconds to wait between checks for query completion. archive : bool, optional (deprecated) If ``True``, archive the import job as soon as it completes. hidden : bool, optional If ``True`` (the default), this job will not appear in the Civis UI. **kwargs : kwargs Extra keyword arguments are passed into :func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or passed into :func:`python:csv.reader` if `use_pandas` is ``False``. Returns ------- data : :class:`pandas:pandas.DataFrame` or list A list of rows (with header as first row) if `use_pandas` is ``False``, otherwise a `pandas` `DataFrame`. Note that if `use_pandas` is ``False``, no parsing of types is performed and each row will be a list of strings. Raises ------ ImportError If `use_pandas` is ``True`` and `pandas` is not installed. Examples -------- >>> table = "schema.table" >>> database = "my_data" >>> columns = ["column_a", "ROW_NUMBER() OVER(ORDER BY date) AS order"] >>> data = read_civis(table, database, columns=columns) >>> columns = data.pop(0) >>> col_a_index = columns.index("column_a") >>> col_a = [row[col_a_index] for row in data] >>> df = read_civis("schema.table", "my_data", use_pandas=True) >>> col_a = df["column_a"] See Also -------- civis.io.read_civis_sql : Read directly into memory using SQL. civis.io.civis_to_csv : Write directly to csv. civis.io.export_to_civis_file : Store a SQL query's results in a Civis file """ if use_pandas and NO_PANDAS: raise ImportError("use_pandas is True but pandas is not installed.") if archive: warnings.warn("`archive` is deprecated and will be removed in v2.0.0. " "Use `hidden` instead.", FutureWarning) if client is None: # Instantiate client here in case users provide a (deprecated) api_key client = APIClient(api_key=api_key) sql = _get_sql_select(table, columns) data = read_civis_sql(sql=sql, database=database, use_pandas=use_pandas, job_name=job_name, client=client, credential_id=credential_id, polling_interval=polling_interval, archive=archive, hidden=hidden, **kwargs) return data def export_to_civis_file(sql, database, job_name=None, client=None, credential_id=None, polling_interval=None, hidden=True, csv_settings=None): """Store results of a query to a Civis file Parameters ---------- sql : str The SQL select string to be executed. database : str or int Execute the query against this database. Can be the database name or ID. job_name : str, optional A name to give the job. If omitted, a random job name will be used. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The database credential ID. If ``None``, the default credential will be used. polling_interval : int or float, optional Number of seconds to wait between checks for query completion. hidden : bool, optional If ``True`` (the default), this job will not appear in the Civis UI. csv_settings : dict, optional A dictionary of csv_settings to pass to :func:`civis.APIClient.scripts.post_sql`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A future which returns the response from :func:`civis.APIClient.scripts.get_sql_runs` after the sql query has completed and the result has been stored as a Civis file. Examples -------- >>> sql = "SELECT * FROM schema.table" >>> fut = export_to_civis_file(sql, "my_database") >>> file_id = fut.result()['output'][0]["file_id"] See Also -------- civis.io.read_civis : Read directly into memory without SQL. civis.io.read_civis_sql : Read results of a SQL query into memory. civis.io.civis_to_csv : Write directly to a CSV file. civis.io.civis_file_to_table : Upload a Civis file to a Civis table """ client = client or APIClient() script_id, run_id = _sql_script(client=client, sql=sql, database=database, job_name=job_name, credential_id=credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) return fut @deprecate_param('v2.0.0', 'api_key') def read_civis_sql(sql, database, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs): """Read data from Civis using a custom SQL string. The custom SQL string will be executed twice; once to attempt to retrieve headers and once to retrieve the data. This is done to use a more performant method for retrieving the data. The first execution of the custom SQL is controlled such that changes in state cannot occur (e.g., INSERT, UPDATE, DELETE, etc.). Parameters ---------- sql : str The SQL select string to be executed. database : str or int Execute the query against this database. Can be the database name or ID. use_pandas : bool, optional If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise, return a list of results from :func:`python:csv.reader`. job_name : str, optional A name to give the job. If omitted, a random job name will be used. api_key : DEPRECATED str, optional Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY` environment variable will be used. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The database credential ID. If ``None``, the default credential will be used. polling_interval : int or float, optional Number of seconds to wait between checks for query completion. archive : bool, optional (deprecated) If ``True``, archive the import job as soon as it completes. hidden : bool, optional If ``True`` (the default), this job will not appear in the Civis UI. **kwargs : kwargs Extra keyword arguments are passed into :func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or passed into :func:`python:csv.reader` if `use_pandas` is ``False``. Returns ------- data : :class:`pandas:pandas.DataFrame` or list A list of rows (with header as first row) if `use_pandas` is ``False``, otherwise a `pandas` `DataFrame`. Note that if `use_pandas` is ``False``, no parsing of types is performed and each row will be a list of strings. Raises ------ ImportError If `use_pandas` is ``True`` and `pandas` is not installed. Examples -------- >>> sql = "SELECT * FROM schema.table" >>> df = read_civis_sql(sql, "my_database", use_pandas=True) >>> col_a = df["column_a"] >>> data = read_civis_sql(sql, "my_database") >>> columns = data.pop(0) >>> col_a_index = columns.index("column_a") >>> col_a = [row[col_a_index] for row in data] Notes ----- This reads the data into memory. See Also -------- civis.io.read_civis : Read directly into memory without SQL. civis.io.civis_to_csv : Write directly to a CSV file. """ if client is None: client = APIClient(api_key=api_key) if use_pandas and NO_PANDAS: raise ImportError("use_pandas is True but pandas is not installed.") if archive: warnings.warn("`archive` is deprecated and will be removed in v2.0.0. " "Use `hidden` instead.", FutureWarning) db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential # Try to get headers separately. In most scenarios this will greatly # reduce the work that Platform does to provide a single output file # with headers prepended to it due to how distributed databases export # data at scale. headers = _get_headers(client, sql, db_id, credential_id, polling_interval) # include_header defaults to True in the API. include_header = True if headers is None else False csv_settings = dict(include_header=include_header, compression='gzip') script_id, run_id = _sql_script(client, sql, db_id, job_name, credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) if archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) fut.result() outputs = client.scripts.get_sql_runs(script_id, run_id)["output"] if not outputs: raise EmptyResultError("Query {} returned no output." .format(script_id)) url = outputs[0]["path"] file_id = outputs[0]["file_id"] log.debug('Exported results to Civis file %s (%s)', outputs[0]["output_name"], file_id) if use_pandas: # allows users to enter their own names parameter _kwargs = {'names': headers} _kwargs.update(kwargs) _kwargs['compression'] = 'gzip' data = pd.read_csv(url, **_kwargs) else: response = requests.get(url, stream=True) response.raise_for_status() with StringIO() as buf: if headers: buf.write(','.join(headers) + '\n') _decompress_stream(response, buf, write_bytes=False) buf.seek(0) data = list(csv.reader(buf, **kwargs)) return data @deprecate_param('v2.0.0', 'api_key') def civis_to_csv(filename, sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter=',', unquoted=False, archive=False, hidden=True, polling_interval=None): """Export data from Civis to a local CSV file. The custom SQL string will be executed twice; once to attempt to retrieve headers and once to retrieve the data. This is done to use a more performant method for retrieving the data. The first execution of the custom SQL is controlled such that changes in state cannot occur (e.g., INSERT, UPDATE, DELETE, etc.). Parameters ---------- filename : str Download exported data into this file. sql : str The SQL select string to be executed. database : str or int Export data from this database. Can be the database name or ID. job_name : str, optional A name to give the job. If omitted, a random job name will be used. api_key : DEPRECATED str, optional Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY` environment variable will be used. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The ID of the database credential. If ``None``, the default credential will be used. include_header: bool, optional If ``True``, the first line of the CSV will be headers. Default: ``True``. compression: str, optional Type of compression to use, if any. One of ``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. ``'gzip'`` currently returns a file with no compression unless include_header is set to False. In a future release, a ``'gzip'`` compressed file will be returned for all cases. delimiter: str, optional Which delimiter to use, if any. One of ``','``, ``'\t'``, or ``'|'``. Default: ``','``. unquoted: bool, optional Whether or not to quote fields. Default: ``False``. polling_interval : int or float, optional Number of seconds to wait between checks for query completion. archive : bool, optional (deprecated) If ``True``, archive the import job as soon as it completes. hidden : bool, optional If ``True`` (the default), this job will not appear in the Civis UI. Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>> sql = "SELECT * FROM schema.table" >>> fut = civis_to_csv("file.csv", sql, "my_database") >>> fut.result() # Wait for job to complete See Also -------- civis.io.read_civis : Read table contents into memory. civis.io.read_civis_sql : Read results of a SQL query into memory. civis.io.export_to_civis_file : Store a SQL query's results in a Civis file """ if archive: warnings.warn("`archive` is deprecated and will be removed in v2.0.0. " "Use `hidden` instead.", FutureWarning) if client is None: client = APIClient(api_key=api_key) db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential # don't fix bug that would cause breaking change for now # when gzip compression is requested, a gzip file is not actually returned # instead the gzip file is decompressed during download if compression == 'gzip' and include_header: compression = 'none' # don't support parallel unload; the output format # is different which would introduce a breaking change headers = b'' delimiter = DELIMITERS.get(delimiter) if not delimiter: raise ValueError("delimiter must be one of {}" .format(DELIMITERS.keys())) # always set compression to gzip to reduce I/O csv_settings = dict(include_header=include_header, compression='gzip', column_delimiter=delimiter, unquoted=unquoted, filename_prefix=None, force_multifile=False) script_id, run_id = _sql_script(client, sql, db_id, job_name, credential_id, hidden=hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) download = _download_callback(script_id, run_id, filename, headers, compression) fut.add_done_callback(download) if archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) return fut @deprecate_param('v2.0.0', 'api_key') def civis_to_multifile_csv(sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter='|', max_file_size=None, unquoted=False, prefix=None, polling_interval=None, hidden=True): """Unload the result of SQL query and return presigned urls. This function is intended for unloading large queries/tables from redshift as it uses a 'PARALLEL ON' S3 unload. It returns a similar manifest file to conventional S3 UNLOAD statements except the CSV parts are accessible via both files endpoint IDs and presigned S3 urls. Parameters ---------- sql : str The SQL select string to be executed. database : str or int Execute the query against this database. Can be the database name or ID. job_name : str, optional A name to give the job. If omitted, a random job name will be used. api_key : DEPRECATED str, optional Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY` environment variable will be used. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The database credential ID. If ``None``, the default credential will be used. include_header: bool, optional If ``True`` include a key in the returned dictionary containing a list of column names. Default: ``True``. compression: str, optional Type of compression to use, if any. One of ``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. delimiter: str, optional Which delimiter to use, if any. One of ``','``, ``'\t'``, or ``'|'``. Default: ``'|'``. max_file_size: int, optional Maximum number of Megabytes each created file will be. unquoted: bool, optional Whether or not to quote fields. Default: ``False``. prefix: str, optional A user specified filename prefix for the output file to have. Default: ``None``. polling_interval : int or float, optional Number of seconds to wait between checks for query completion. hidden : bool, optional If ``True`` (the default), this job will not appear in the Civis UI. Returns ------- unload_manifest: dict A dictionary resembling an AWS manifest file. Has the following keys: 'query': str The query. 'header': list of str The columns from the query. 'entries': list of dict Each dict has the following keys: 'id': int File ID 'name': str Filename 'size': int File size in bytes 'url': str Unsigned S3 URL ('s3://...') 'url_signed': str Signed S3 URL ('https://...') 'unquoted': bool Whether the cells are quoted. 'compression': str Type of compression used. 'delimiter': str Delimiter that separates the cells. Examples -------- >>> sql = "SELECT * FROM schema.my_big_table" >>> database = "my_database" >>> delimiter = "|" >>> manifest = civis_to_multifile_csv(sql, database, delimiter=delimiter) >>> ids = [entry['id'] for entry in manifest['entries']] >>> buf = BytesIO() >>> civis_to_file(ids[0], buf) >>> buf.seek(0) >>> df = pd.read_csv(buf, delimiter=delimiter) See Also -------- civis.APIClient.scripts.post_sql """ if client is None: client = APIClient(api_key=api_key) delimiter = DELIMITERS.get(delimiter) assert delimiter, "delimiter must be one of {}".format(DELIMITERS.keys()) csv_settings = dict(include_header=include_header, compression=compression, column_delimiter=delimiter, unquoted=unquoted, filename_prefix=prefix, force_multifile=True, max_file_size=max_file_size) script_id, run_id = _sql_script(client, sql, database, job_name, credential_id, hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) outputs = fut.result()["output"] if not outputs: raise EmptyResultError("Unload query {} returned no manifest." .format(script_id)) buf = io.BytesIO() civis_to_file(outputs[0]['file_id'], buf, client=client) txt = io.TextIOWrapper(buf, encoding='utf-8') txt.seek(0) unload_manifest = json.load(txt) return unload_manifest @deprecate_param('v2.0.0', 'api_key', 'headers') def dataframe_to_civis(df, database, table, api_key=None, client=None, max_errors=None, existing_table_rows="fail", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, headers=None, credential_id=None, primary_keys=None, last_modified_keys=None, execution="immediate", delimiter=None, polling_interval=None, archive=False, hidden=True, **kwargs): """Upload a `pandas` `DataFrame` into a Civis table. The `DataFrame`'s index will not be included. To store the index along with the other values, use `df.reset_index()` instead of `df` as the first argument to this function. Parameters ---------- df : :class:`pandas:pandas.DataFrame` The `DataFrame` to upload to Civis. database : str or int Upload data into this database. Can be the database name or ID. table : str The schema and table you want to upload to. E.g., ``'scratch.table'``. Schemas or tablenames with periods must be double quoted, e.g. ``'scratch."my.table"'``. api_key : DEPRECATED str, optional Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY` environment variable will be used. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. max_errors : int, optional The maximum number of rows with errors to remove from the import before failing. existing_table_rows : str, optional The behaviour if a table with the requested name already exists. One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or ``'upsert'``. Defaults to ``'fail'``. diststyle : str, optional The distribution style for the table. One of ``'even'``, ``'all'`` or ``'key'``. distkey : str, optional The column to use as the distkey for the table. sortkey1 : str, optional The column to use as the sortkey for the table. sortkey2 : str, optional The second column in a compound sortkey for the table. table_columns : list[Dict[str, str]], optional A list of dictionaries corresponding to the columns in the source file. Each dictionary should have keys for column "name" and "sqlType". The import will only copy these columns regardless if there are more columns in the table. headers : bool, optional [DEPRECATED] Whether or not the first row of the file should be treated as headers. The default, ``None``, attempts to autodetect whether or not the first row contains headers. This parameter has no effect in versions >= 1.11 and will be removed in v2.0. Tables will always be written with column names read from the DataFrame. Use the `header` parameter (which will be passed directly to :func:`~pandas.DataFrame.to_csv`) to modify the column names in the Civis Table. credential_id : str or int, optional The ID of the database credential. If ``None``, the default credential will be used. primary_keys: list[str], optional A list of the primary key column(s) of the destination table that uniquely identify a record. If existing_table_rows is "upsert", this field is required. Note that this is true regardless of whether the destination database itself requires a primary key. last_modified_keys: list[str], optional A list of the columns indicating a record has been updated. If existing_table_rows is "upsert", this field is required. escaped: bool, optional A boolean value indicating whether or not the source file has quotes escaped with a backslash. Defaults to false. execution: string, optional, default "immediate" One of "delayed" or "immediate". If "immediate", refresh column statistics as part of the run. If "delayed", flag the table for a deferred statistics update; column statistics may not be available for up to 24 hours. In addition, if existing_table_rows is "upsert", delayed executions move data from staging table to final table after a brief delay, in order to accommodate multiple concurrent imports to the same destination table. polling_interval : int or float, optional Number of seconds to wait between checks for job completion. archive : bool, optional (deprecated) If ``True``, archive the import job as soon as it completes. hidden : bool, optional If ``True`` (the default), this job will not appear in the Civis UI. **kwargs : kwargs Extra keyword arguments will be passed to :meth:`pandas:pandas.DataFrame.to_csv`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>> import pandas as pd >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}) >>> fut = civis.io.dataframe_to_civis(df, 'my-database', ... 'scratch.df_table') >>> fut.result() See Also -------- :func:`~pandas.DataFrame.to_csv` """ if client is None: client = APIClient(api_key=api_key) if archive: warnings.warn("`archive` is deprecated and will be removed in v2.0.0. " "Use `hidden` instead.", FutureWarning) headers = False if kwargs.get('header') is False else True with TemporaryDirectory() as tmp_dir: tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv') to_csv_kwargs = {'encoding': 'utf-8', 'index': False} to_csv_kwargs.update(kwargs) df.to_csv(tmp_path, **to_csv_kwargs) _, name = split_schema_tablename(table) file_id = file_to_civis(tmp_path, name, client=client) delimiter = ',' fut = civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=False, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'api_key') def csv_to_civis(filename, database, table, api_key=None, client=None, max_errors=None, existing_table_rows="fail", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, delimiter=",", headers=None, primary_keys=None, last_modified_keys=None, escaped=False, execution="immediate", credential_id=None, polling_interval=None, archive=False, hidden=True): """Upload the contents of a local CSV file to Civis. Parameters ---------- filename : str Upload the contents of this file. database : str or int Upload data into this database. Can be the database name or ID. table : str The schema and table you want to upload to. E.g., ``'scratch.table'``. api_key : DEPRECATED str, optional Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY` environment variable will be used. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. max_errors : int, optional The maximum number of rows with errors to remove from the import before failing. existing_table_rows : str, optional The behaviour if a table with the requested name already exists. One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or ``'upsert'``. Defaults to ``'fail'``. diststyle : str, optional The distribution style for the table. One of ``'even'``, ``'all'`` or ``'key'``. distkey : str, optional The column to use as the distkey for the table. sortkey1 : str, optional The column to use as the sortkey for the table. sortkey2 : str, optional The second column in a compound sortkey for the table. table_columns : list[Dict[str, str]], optional A list of dictionaries corresponding to the columns in the source file. Each dictionary should have keys for column "name" and "sqlType". The import will only copy these columns regardless if there are more columns in the table. delimiter : string, optional The column delimiter. One of ``','``, ``'\\t'`` or ``'|'``. headers : bool, optional Whether or not the first row of the file should be treated as headers. The default, ``None``, attempts to autodetect whether or not the first row contains headers. primary_keys: list[str], optional A list of the primary key column(s) of the destination table that uniquely identify a record. If existing_table_rows is "upsert", this field is required. Note that this is true regardless of whether the destination database itself requires a primary key. last_modified_keys: list[str], optional A list of the columns indicating a record has been updated. If existing_table_rows is "upsert", this field is required. escaped: bool, optional A boolean value indicating whether or not the source file has quotes escaped with a backslash. Defaults to false. execution: string, optional, default "immediate" One of "delayed" or "immediate". If "immediate", refresh column statistics as part of the run. If "delayed", flag the table for a deferred statistics update; column statistics may not be available for up to 24 hours. In addition, if existing_table_rows is "upsert", delayed executions move data from staging table to final table after a brief delay, in order to accommodate multiple concurrent imports to the same destination table. credential_id : str or int, optional The ID of the database credential. If ``None``, the default credential will be used. polling_interval : int or float, optional Number of seconds to wait between checks for job completion. archive : bool, optional (deprecated) If ``True``, archive the import job as soon as it completes. hidden : bool, optional If ``True`` (the default), this job will not appear in the Civis UI. Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Notes ----- This reads the contents of `filename` into memory. Examples -------- >>> with open('input_file.csv', 'w') as _input: ... _input.write('a,b,c\\n1,2,3') >>> fut = civis.io.csv_to_civis('input_file.csv', ... 'my-database', ... 'scratch.my_data') >>> fut.result() """ if client is None: client = APIClient(api_key=api_key) if archive: warnings.warn("`archive` is deprecated and will be removed in v2.0.0. " "Use `hidden` instead.", FutureWarning) name = path.basename(filename) with open(filename, "rb") as data: file_id = file_to_civis(data, name, client=client) log.debug('Uploaded file %s to Civis file %s', filename, file_id) fut = civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=escaped, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'file_id') def civis_file_to_table(file_id, database, table, client=None, max_errors=None, existing_table_rows="fail", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, primary_keys=None, last_modified_keys=None, escaped=False, execution="immediate", delimiter=None, headers=None, credential_id=None, polling_interval=None, hidden=True): """Upload the contents of one or more Civis files to a Civis table. All provided files will be loaded as an atomic unit in parallel, and should share the same columns in the same order, and be in the same format. Parameters ---------- file_id : int or list[int] Civis file ID or a list of Civis file IDs. Reference by name to this argument is deprecated, as the name will change in v2.0.0. database : str or int Upload data into this database. Can be the database name or ID. table : str The schema and table you want to upload to. E.g., ``'scratch.table'``. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. max_errors : int, optional The maximum number of rows with errors to remove from the import before failing. If multiple files are provided, this limit applies across all files combined. existing_table_rows : str, optional The behaviour if a table with the requested name already exists. One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or ``'upsert'``. Defaults to ``'fail'``. diststyle : str, optional The distribution style for the table. One of ``'even'``, ``'all'`` or ``'key'``. distkey : str, optional The column to use as the distkey for the table. sortkey1 : str, optional The column to use as the sortkey for the table. sortkey2 : str, optional The second column in a compound sortkey for the table. table_columns : list[Dict[str, str]], optional A list of dictionaries corresponding to the columns in the source file. Each dictionary should have keys for column "name" and "sqlType". The import will only copy these columns regardless if there are more columns in the table. primary_keys: list[str], optional A list of the primary key column(s) of the destination table that uniquely identify a record. If existing_table_rows is "upsert", this field is required. Note that this is true regardless of whether the destination database itself requires a primary key. last_modified_keys: list[str], optional A list of the columns indicating a record has been updated. If existing_table_rows is "upsert", this field is required. escaped: bool, optional A boolean value indicating whether or not the source file(s) escape quotes with a backslash. Defaults to false. execution: string, optional, default "immediate" One of "delayed" or "immediate". If "immediate", refresh column statistics as part of the run. If "delayed", flag the table for a deferred statistics update; column statistics may not be available for up to 24 hours. In addition, if existing_table_rows is "upsert", delayed executions move data from staging table to final table after a brief delay, in order to accommodate multiple concurrent imports to the same destination table. delimiter : string, optional The column delimiter. One of ``','``, ``'\\t'`` or ``'|'``. If not provided, will attempt to auto-detect. headers : bool, optional Whether or not the first row of the file should be treated as headers. The default, ``None``, attempts to autodetect whether or not the first row contains headers. credential_id : str or int, optional The ID of the database credential. If ``None``, the default credential will be used. polling_interval : int or float, optional Number of seconds to wait between checks for job completion. hidden : bool, optional If ``True`` (the default), this job will not appear in the Civis UI. Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Raises ------ CivisImportError If multiple files are given and determined to be incompatible for import. This may be the case if their columns have different types, their delimiters are different, headers are present in some but not others, or compressions do not match. Examples -------- >>> file_id = 100 >>> fut = civis.io.civis_file_to_table(file_id, ... 'my-database', ... 'scratch.my_data') >>> fut.result() """ if client is None: client = APIClient() schema, table = split_schema_tablename(table) if isinstance(file_id, int): file_id = [file_id] if schema is None: raise ValueError("Provide a schema as part of the `table` input.") db_id = client.get_database_id(database) cred_id = credential_id or client.default_credential if delimiter is not None: # i.e. it was provided as an argument delimiter = DELIMITERS.get(delimiter) assert delimiter, "delimiter must be one of {}".format( DELIMITERS.keys() ) try: client.get_table_id(table, database) log.debug('Table {table} already exists - skipping column ' 'detection'.format(table=table)) table_exists = True except ValueError: table_exists = False # Use Preprocess endpoint to get the table columns as needed # and perform necessary file cleaning need_table_columns = ((not table_exists or existing_table_rows == 'drop') and table_columns is None) cleaning_futures = _run_cleaning(file_id, client, need_table_columns, headers, delimiter, hidden) (cleaned_file_ids, headers, compression, delimiter, cleaned_table_columns) = _process_cleaning_results( cleaning_futures, client, headers, need_table_columns, delimiter ) table_columns = table_columns or cleaned_table_columns source = dict(file_ids=cleaned_file_ids) destination = dict(schema=schema, table=table, remote_host_id=db_id, credential_id=cred_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys) redshift_options = dict(distkey=distkey, sortkeys=[sortkey1, sortkey2], diststyle=diststyle) # If multiple files are being imported, there might be differences in # their precisions/lengths - setting this option will allow the Civis API # to increase these values for the data types provided, and decreases the # risk of a length-related import failure loosen_types = len(file_id) > 1 import_name = 'CSV import to {}.{}'.format(schema, table) import_job = client.imports.post_files_csv( source, destination, headers, name=import_name, max_errors=max_errors, existing_table_rows=existing_table_rows, column_delimiter=delimiter, compression=compression, escaped=escaped, execution=execution, loosen_types=loosen_types, table_columns=table_columns, redshift_destination_options=redshift_options, hidden=hidden ) fut = run_job(import_job.id, client=client, polling_interval=polling_interval) log.debug('Started run %d for import %d', fut.run_id, import_job.id) return fut def _sql_script(client, sql, database, job_name, credential_id, hidden=False, csv_settings=None): job_name = maybe_get_random_name(job_name) db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential csv_settings = csv_settings or {} export_job = client.scripts.post_sql(job_name, remote_host_id=db_id, credential_id=credential_id, sql=sql, hidden=hidden, csv_settings=csv_settings) run_job = client.scripts.post_sql_runs(export_job.id) log.debug('Started run %d of SQL script %d', run_job.id, export_job.id) return export_job.id, run_job.id def _get_sql_select(table, columns=None): if columns and not isinstance(columns, (list, tuple)): raise TypeError("columns must be a list, tuple or None") select = ", ".join(columns) if columns is not None else "*" sql = "select {} from {}".format(select, table) return sql def _get_headers(client, sql, database, credential_id, polling_interval=None): headers = None try: # use 'begin read only;' to ensure we can't change state sql = 'begin read only; select * from ({}) limit 1'.format(sql) fut = query_civis(sql, database, client=client, credential_id=credential_id, polling_interval=polling_interval) headers = fut.result()['result_columns'] except Exception as exc: # NOQA log.debug("Failed to retrieve headers due to %s", str(exc)) return headers def _decompress_stream(response, buf, write_bytes=True): # use response.raw for a more consistent approach # if content-encoding is specified in the headers # then response.iter_content will decompress the stream # however, our use of content-encoding is inconsistent chunk = response.raw.read(CHUNK_SIZE) d = zlib.decompressobj(zlib.MAX_WBITS | 32) while chunk or d.unused_data: if d.unused_data: to_decompress = d.unused_data + chunk d = zlib.decompressobj(zlib.MAX_WBITS | 32) else: to_decompress = d.unconsumed_tail + chunk if write_bytes: buf.write(d.decompress(to_decompress)) else: buf.write(d.decompress(to_decompress).decode('utf-8')) chunk = response.raw.read(CHUNK_SIZE) def _download_file(url, local_path, headers, compression): response = requests.get(url, stream=True) response.raise_for_status() # gzipped buffers can be concatenated so write headers as gzip if compression == 'gzip': with gzip.open(local_path, 'wb') as fout: fout.write(headers) with open(local_path, 'ab') as fout: shutil.copyfileobj(response.raw, fout, CHUNK_SIZE) # write headers and decompress the stream elif compression == 'none': with open(local_path, 'wb') as fout: fout.write(headers) _decompress_stream(response, fout) # decompress the stream, write headers, and zip the file elif compression == 'zip': with TemporaryDirectory() as tmp_dir: tmp_path = path.join(tmp_dir, 'civis_to_csv.csv') with open(tmp_path, 'wb') as tmp_file: tmp_file.write(headers) _decompress_stream(response, tmp_file) with zipfile.ZipFile(local_path, 'w') as fout: arcname = path.basename(local_path) if arcname.split('.')[-1] == 'zip': arcname = arcname.split('.')[0] + '.csv' fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED) def _download_callback(job_id, run_id, filename, headers, compression): def callback(future): if not future.succeeded(): return outputs = future.result().get("output") if not outputs: warnings.warn("Job %s, run %s does not have any output to " "download. Not creating file %s." % (job_id, run_id, filename), RuntimeWarning) return else: url = outputs[0]["path"] file_id = outputs[0]["file_id"] log.debug('Exported results to Civis file %s', file_id) return _download_file(url, filename, headers, compression) return callback def split_schema_tablename(table): """Split a Redshift 'schema.tablename' string Remember that special characters (such as '.') can only be included in a schema or table name if delimited by double-quotes. Parameters ---------- table: str Either a Redshift schema and table name combined with a ".", or else a single table name. Returns ------- schema, tablename A 2-tuple of strings. The ``schema`` may be None if the input is only a table name, but the ``tablename`` will always be filled. Raises ------ ValueError If the input ``table`` is not separable into a schema and table name. """ reader = csv.reader(StringIO(str(table)), delimiter=".", doublequote=True, quotechar='"') schema_name_tup = next(reader) if len(schema_name_tup) == 1: schema_name_tup = (None, schema_name_tup[0]) if len(schema_name_tup) != 2: raise ValueError("Cannot parse schema and table. " "Does '{}' follow the pattern 'schema.table'?" .format(table)) return tuple(schema_name_tup) def _replace_null_column_names(column_list): """Replace null names in columns from file cleaning with an appropriately blank column name. Parameters ---------- column_list: list[dict] the list of columns from file cleaning. Returns -------- column_list: list[dict] """ new_cols = [] for i, col in enumerate(column_list): # Avoid mutating input arguments new_col = dict(col) if new_col.get('name') is None: new_col['name'] = 'column_{}'.format(i) new_cols.append(new_col) return new_cols def _run_cleaning(file_ids, client, need_table_columns, headers, delimiter, hidden, polling_interval=None): cleaning_futures = [] for fid in file_ids: cleaner_job = client.files.post_preprocess_csv( file_id=fid, in_place=False, detect_table_columns=need_table_columns, force_character_set_conversion=True, include_header=headers, column_delimiter=delimiter, hidden=hidden ) cleaning_futures.append(run_job(cleaner_job.id, client=client, polling_interval=polling_interval)) return cleaning_futures def _check_all_detected_info(detected_info, headers, delimiter, compression, output_file_id): """Check a single round of cleaning results as compared to provided values. Parameters ---------- detected_info: Dict[str, Any] The detected info of the file as returned by the Civis API. headers: bool The provided value for whether or not the file contains errors. delimiter: str The provided value for the file delimiter. compression: str The provided value for the file compression. output_file_id: int The cleaned file's Civis ID. Used for debugging. Raises ------ CivisImportError If the values detected on the file do not match their expected attributes. """ if headers != detected_info['includeHeader']: raise CivisImportError('Mismatch between detected headers - ' 'please ensure all imported files either ' 'have a header or do not.') if delimiter != detected_info['columnDelimiter']: raise CivisImportError('Provided delimiter "{}" does not match ' 'detected delimiter for {}: "{}"'.format( delimiter, output_file_id, detected_info["columnDelimiter"]) ) if compression != detected_info['compression']: raise CivisImportError('Mismatch between detected and provided ' 'compressions - provided compression was {}' ' but detected compression {}. Please ' 'ensure all imported files have the same ' 'compression.'.format( compression, detected_info['compression']) ) def _process_cleaning_results(cleaning_futures, client, headers, need_table_columns, delimiter): cleaned_file_ids = [] done, still_going = concurrent.futures.wait( cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED ) # Set values from first completed file cleaning - other files will be # compared to this one. If inconsistencies are detected, raise an error. first_completed = done.pop() output_file = client.jobs.list_runs_outputs( first_completed.job_id, first_completed.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info table_columns = (detected_info['tableColumns'] if need_table_columns else None) if headers is None: headers = detected_info['includeHeader'] if delimiter is None: delimiter = detected_info['columnDelimiter'] compression = detected_info['compression'] _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) # Ensure that all results from files are correctly accounted for - # Since concurrent.futures.wait returns two sets, it is possible # That done contains more than one Future. Thus it is necessary to account # for these possible completed cleaning runs while waiting on those which # are still running. for result in concurrent.futures.as_completed(done | still_going): output_file = client.jobs.list_runs_outputs( result.job_id, result.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info if need_table_columns: file_columns = detected_info['tableColumns'] _check_column_types(table_columns, file_columns, output_file.object_id) _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) if need_table_columns: table_columns = _replace_null_column_names(table_columns) return cleaned_file_ids, headers, compression, delimiter, table_columns def _check_column_types(table_columns, file_columns, output_obj_id): """Check that base column types match those current defined for the table. Parameters ---------- table_columns: List[Dict[str, str]] The columns for the table to be created. file_columns: List[Dict[str, str]] The columns detected by the Civis API for the file. output_obj_id: int The file ID under consideration; used for error messaging. Raises ------ CivisImportError If the table columns and the file columns have a type mismatch, or differ in count. """ if len(table_columns) != len(file_columns): raise CivisImportError('All files should have the same number of ' 'columns. Expected {} columns but file {} ' 'has {} columns'.format( len(table_columns), output_obj_id, len(file_columns)) ) error_msgs = [] for idx, (tcol, fcol) in enumerate(zip(table_columns, file_columns)): # for the purposes of type checking, we care only that the types # share a base type (e.g. INT, VARCHAR, DECIMAl) rather than that # they have the same precision and length # (e.g VARCHAR(42), DECIMAL(8, 10)) tcol_base_type = tcol['sql_type'].split('(', 1)[0] fcol_base_type = fcol['sql_type'].split('(', 1)[0] if tcol_base_type != fcol_base_type: error_msgs.append( 'Column {}: File base type was {}, but expected {}'.format( idx, fcol_base_type, tcol_base_type ) ) if error_msgs: raise CivisImportError( 'Encountered the following errors for file {}:\n\t{}'.format( output_obj_id, '\n\t'.join(error_msgs) ) )
[((37, 6, 37, 33), 'logging.getLogger', 'logging.getLogger', ({(37, 24, 37, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((50, 1, 50, 37), 'civis._deprecation.deprecate_param', 'deprecate_param', ({(50, 17, 50, 25): '"""v2.0.0"""', (50, 27, 50, 36): '"""api_key"""'}, {}), "('v2.0.0', 'api_key')", False, 'from civis._deprecation import deprecate_param\n'), ((207, 1, 207, 37), 'civis._deprecation.deprecate_param', 'deprecate_param', ({(207, 17, 207, 25): '"""v2.0.0"""', (207, 27, 207, 36): '"""api_key"""'}, {}), "('v2.0.0', 'api_key')", False, 'from civis._deprecation import deprecate_param\n'), ((354, 1, 354, 37), 'civis._deprecation.deprecate_param', 'deprecate_param', ({(354, 17, 354, 25): '"""v2.0.0"""', (354, 27, 354, 36): '"""api_key"""'}, {}), "('v2.0.0', 'api_key')", False, 'from civis._deprecation import deprecate_param\n'), ((476, 1, 476, 37), 'civis._deprecation.deprecate_param', 'deprecate_param', ({(476, 17, 476, 25): '"""v2.0.0"""', (476, 27, 476, 36): '"""api_key"""'}, {}), "('v2.0.0', 'api_key')", False, 'from civis._deprecation import deprecate_param\n'), ((615, 1, 615, 48), 'civis._deprecation.deprecate_param', 'deprecate_param', ({(615, 17, 615, 25): '"""v2.0.0"""', (615, 27, 615, 36): '"""api_key"""', (615, 38, 615, 47): '"""headers"""'}, {}), "('v2.0.0', 'api_key', 'headers')", False, 'from civis._deprecation import deprecate_param\n'), ((761, 1, 761, 37), 'civis._deprecation.deprecate_param', 'deprecate_param', ({(761, 17, 761, 25): '"""v2.0.0"""', (761, 27, 761, 36): '"""api_key"""'}, {}), "('v2.0.0', 'api_key')", False, 'from civis._deprecation import deprecate_param\n'), ((890, 1, 890, 37), 'civis._deprecation.deprecate_param', 'deprecate_param', ({(890, 17, 890, 25): '"""v2.0.0"""', (890, 27, 890, 36): '"""file_id"""'}, {}), "('v2.0.0', 'file_id')", False, 'from civis._deprecation import deprecate_param\n'), ((201, 10, 203, 45), 'civis.futures.CivisFuture', 'CivisFuture', (), '', False, 'from civis.futures import CivisFuture\n'), ((313, 10, 315, 45), 'civis.futures.CivisFuture', 'CivisFuture', (), '', False, 'from civis.futures import CivisFuture\n'), ((460, 10, 462, 45), 'civis.futures.CivisFuture', 'CivisFuture', (), '', False, 'from civis.futures import CivisFuture\n'), ((597, 10, 599, 45), 'civis.futures.CivisFuture', 'CivisFuture', (), '', False, 'from civis.futures import CivisFuture\n'), ((606, 10, 606, 22), 'io.BytesIO', 'io.BytesIO', ({}, {}), '()', False, 'import io\n'), ((607, 4, 607, 60), 'civis.io.civis_to_file', 'civis_to_file', (), '', False, 'from civis.io import civis_to_file, file_to_civis, query_civis\n'), ((608, 10, 608, 49), 'io.TextIOWrapper', 'io.TextIOWrapper', (), '', False, 'import io\n'), ((610, 22, 610, 36), 'json.load', 'json.load', ({(610, 32, 610, 35): 'txt'}, {}), '(txt)', False, 'import json\n'), ((870, 11, 870, 34), 'os.path.basename', 'path.basename', ({(870, 25, 870, 33): 'filename'}, {}), '(filename)', False, 'from os import path\n'), ((1074, 15, 1074, 46), 'civis._utils.maybe_get_random_name', 'maybe_get_random_name', ({(1074, 37, 1074, 45): 'job_name'}, {}), '(job_name)', False, 'from civis._utils import maybe_get_random_name\n'), ((1122, 8, 1122, 47), 'zlib.decompressobj', 'zlib.decompressobj', ({(1122, 27, 1122, 46): 'zlib.MAX_WBITS | 32'}, {}), '(zlib.MAX_WBITS | 32)', False, 'import zlib\n'), ((1138, 15, 1138, 45), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((129, 8, 130, 61), 'warnings.warn', 'warnings.warn', ({(129, 22, 130, 45): '"""`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead."""', (130, 47, 130, 60): 'FutureWarning'}, {}), "(\n '`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead.'\n , FutureWarning)", False, 'import warnings\n'), ((133, 17, 133, 43), 'civis.APIClient', 'APIClient', (), '', False, 'from civis import APIClient\n'), ((193, 23, 193, 34), 'civis.APIClient', 'APIClient', ({}, {}), '()', False, 'from civis import APIClient\n'), ((288, 17, 288, 43), 'civis.APIClient', 'APIClient', (), '', False, 'from civis import APIClient\n'), ((292, 8, 293, 61), 'warnings.warn', 'warnings.warn', ({(292, 22, 293, 45): '"""`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead."""', (293, 47, 293, 60): 'FutureWarning'}, {}), "(\n '`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead.'\n , FutureWarning)", False, 'import warnings\n'), ((339, 15, 339, 42), 'pandas.read_csv', 'pd.read_csv', ({(339, 27, 339, 30): 'url'}, {}), '(url, **_kwargs)', True, 'import pandas as pd\n'), ((341, 19, 341, 49), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((426, 8, 427, 61), 'warnings.warn', 'warnings.warn', ({(426, 22, 427, 45): '"""`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead."""', (427, 47, 427, 60): 'FutureWarning'}, {}), "(\n '`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead.'\n , FutureWarning)", False, 'import warnings\n'), ((429, 17, 429, 43), 'civis.APIClient', 'APIClient', (), '', False, 'from civis import APIClient\n'), ((582, 17, 582, 43), 'civis.APIClient', 'APIClient', (), '', False, 'from civis import APIClient\n'), ((729, 17, 729, 43), 'civis.APIClient', 'APIClient', (), '', False, 'from civis import APIClient\n'), ((731, 8, 732, 61), 'warnings.warn', 'warnings.warn', ({(731, 22, 732, 45): '"""`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead."""', (732, 47, 732, 60): 'FutureWarning'}, {}), "(\n '`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead.'\n , FutureWarning)", False, 'import warnings\n'), ((735, 9, 735, 29), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ({}, {}), '()', False, 'from tempfile import TemporaryDirectory\n'), ((736, 19, 736, 66), 'os.path.join', 'os.path.join', ({(736, 32, 736, 39): 'tmp_dir', (736, 41, 736, 65): '"""dataframe_to_civis.csv"""'}, {}), "(tmp_dir, 'dataframe_to_civis.csv')", False, 'import os\n'), ((741, 18, 741, 62), 'civis.io.file_to_civis', 'file_to_civis', (), '', False, 'from civis.io import civis_to_file, file_to_civis, query_civis\n'), ((865, 17, 865, 43), 'civis.APIClient', 'APIClient', (), '', False, 'from civis import APIClient\n'), ((867, 8, 868, 61), 'warnings.warn', 'warnings.warn', ({(867, 22, 868, 45): '"""`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead."""', (868, 47, 868, 60): 'FutureWarning'}, {}), "(\n '`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead.'\n , FutureWarning)", False, 'import warnings\n'), ((872, 18, 872, 58), 'civis.io.file_to_civis', 'file_to_civis', (), '', False, 'from civis.io import civis_to_file, file_to_civis, query_civis\n'), ((997, 17, 997, 28), 'civis.APIClient', 'APIClient', ({}, {}), '()', False, 'from civis import APIClient\n'), ((1105, 14, 1107, 60), 'civis.io.query_civis', 'query_civis', (), '', False, 'from civis.io import civis_to_file, file_to_civis, query_civis\n'), ((1294, 14, 1296, 58), 'civis.base.CivisImportError', 'CivisImportError', ({(1294, 31, 1296, 57): '"""Mismatch between detected headers - please ensure all imported files either have a header or do not."""'}, {}), "(\n 'Mismatch between detected headers - please ensure all imported files either have a header or do not.'\n )", False, 'from civis.base import EmptyResultError, CivisImportError\n'), ((344, 13, 344, 23), 'cStringIO.StringIO', 'StringIO', ({}, {}), '()', False, 'from cStringIO import StringIO\n'), ((1127, 16, 1127, 55), 'zlib.decompressobj', 'zlib.decompressobj', ({(1127, 35, 1127, 54): 'zlib.MAX_WBITS | 32'}, {}), '(zlib.MAX_WBITS | 32)', False, 'import zlib\n'), ((1143, 13, 1143, 40), 'gzip.open', 'gzip.open', ({(1143, 23, 1143, 33): 'local_path', (1143, 35, 1143, 39): '"""wb"""'}, {}), "(local_path, 'wb')", False, 'import gzip\n'), ((1146, 12, 1146, 62), 'shutil.copyfileobj', 'shutil.copyfileobj', ({(1146, 31, 1146, 43): 'response.raw', (1146, 45, 1146, 49): 'fout', (1146, 51, 1146, 61): 'CHUNK_SIZE'}, {}), '(response.raw, fout, CHUNK_SIZE)', False, 'import shutil\n'), ((1176, 12, 1179, 41), 'warnings.warn', 'warnings.warn', ({(1176, 26, 1178, 54): "('Job %s, run %s does not have any output to download. Not creating file %s.' %\n (job_id, run_id, filename))", (1179, 26, 1179, 40): 'RuntimeWarning'}, {}), "(\n 'Job %s, run %s does not have any output to download. Not creating file %s.'\n % (job_id, run_id, filename), RuntimeWarning)", False, 'import warnings\n'), ((349, 24, 349, 49), 'csv.reader', 'csv.reader', ({(349, 35, 349, 38): 'buf'}, {}), '(buf, **kwargs)', False, 'import csv\n'), ((1156, 13, 1156, 33), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ({}, {}), '()', False, 'from tempfile import TemporaryDirectory\n'), ((1157, 23, 1157, 61), 'os.path.join', 'path.join', ({(1157, 33, 1157, 40): 'tmp_dir', (1157, 42, 1157, 60): '"""civis_to_csv.csv"""'}, {}), "(tmp_dir, 'civis_to_csv.csv')", False, 'from os import path\n'), ((1162, 17, 1162, 49), 'zipfile.ZipFile', 'zipfile.ZipFile', ({(1162, 33, 1162, 43): 'local_path', (1162, 45, 1162, 48): '"""w"""'}, {}), "(local_path, 'w')", False, 'import zipfile\n'), ((1163, 26, 1163, 51), 'os.path.basename', 'path.basename', ({(1163, 40, 1163, 50): 'local_path'}, {}), '(local_path)', False, 'from os import path\n')]
chschroeder/small-text
tests/unit/small_text/integrations/pytorch/test_strategies.py
ef28e91ba0c94fe938dde4f16253aa8695ea13b7
import unittest import pytest from small_text.integrations.pytorch.exceptions import PytorchNotFoundError try: from small_text.integrations.pytorch.query_strategies import ( BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord) except PytorchNotFoundError: pass @pytest.mark.pytorch class BADGETest(unittest.TestCase): def test_init_default(self): strategy = BADGE(2) self.assertEqual(2, strategy.num_classes) def test_init(self): strategy = BADGE(4) self.assertEqual(4, strategy.num_classes) def test_badge_str(self): strategy = BADGE(2) expected_str = 'BADGE(num_classes=2)' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLength(2) self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) def test_init(self): strategy = ExpectedGradientLength(4, batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) def test_expected_gradient_length_str(self): strategy = ExpectedGradientLength(2) expected_str = 'ExpectedGradientLength()' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthMaxWordTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLengthMaxWord(2, 'embedding') self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) self.assertEqual('embedding', strategy.layer_name) def test_init(self): strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) self.assertEqual('embedding', strategy.layer_name)
[((20, 19, 20, 27), 'small_text.integrations.pytorch.query_strategies.BADGE', 'BADGE', ({(20, 25, 20, 26): '2'}, {}), '(2)', False, 'from small_text.integrations.pytorch.query_strategies import BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord\n'), ((24, 19, 24, 27), 'small_text.integrations.pytorch.query_strategies.BADGE', 'BADGE', ({(24, 25, 24, 26): '4'}, {}), '(4)', False, 'from small_text.integrations.pytorch.query_strategies import BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord\n'), ((28, 19, 28, 27), 'small_text.integrations.pytorch.query_strategies.BADGE', 'BADGE', ({(28, 25, 28, 26): '2'}, {}), '(2)', False, 'from small_text.integrations.pytorch.query_strategies import BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord\n'), ((37, 19, 37, 44), 'small_text.integrations.pytorch.query_strategies.ExpectedGradientLength', 'ExpectedGradientLength', ({(37, 42, 37, 43): '2'}, {}), '(2)', False, 'from small_text.integrations.pytorch.query_strategies import BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord\n'), ((44, 19, 44, 74), 'small_text.integrations.pytorch.query_strategies.ExpectedGradientLength', 'ExpectedGradientLength', (), '', False, 'from small_text.integrations.pytorch.query_strategies import BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord\n'), ((51, 19, 51, 44), 'small_text.integrations.pytorch.query_strategies.ExpectedGradientLength', 'ExpectedGradientLength', ({(51, 42, 51, 43): '2'}, {}), '(2)', False, 'from small_text.integrations.pytorch.query_strategies import BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord\n'), ((59, 19, 59, 64), 'small_text.integrations.pytorch.query_strategies.ExpectedGradientLengthMaxWord', 'ExpectedGradientLengthMaxWord', ({(59, 49, 59, 50): '2', (59, 52, 59, 63): '"""embedding"""'}, {}), "(2, 'embedding')", False, 'from small_text.integrations.pytorch.query_strategies import BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord\n'), ((67, 19, 67, 94), 'small_text.integrations.pytorch.query_strategies.ExpectedGradientLengthMaxWord', 'ExpectedGradientLengthMaxWord', (), '', False, 'from small_text.integrations.pytorch.query_strategies import BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord\n')]
stonewell/pymterm
pymterm/colour/tango.py
af36656d5f7fb008533178d14b00d83d72ba00cf
TANGO_PALLETE = [ '2e2e34343636', 'cccc00000000', '4e4e9a9a0606', 'c4c4a0a00000', '34346565a4a4', '757550507b7b', '060698989a9a', 'd3d3d7d7cfcf', '555557575353', 'efef29292929', '8a8ae2e23434', 'fcfce9e94f4f', '72729f9fcfcf', 'adad7f7fa8a8', '3434e2e2e2e2', 'eeeeeeeeecec', ] def parse_tango_color(c): r = int(c[:4][:2], 16) g = int(c[4:8][:2], 16) b = int(c[8:][:2], 16) return [r, g, b, 0xFF] def apply_color(cfg, color_table): cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec') cfg.default_background_color = parse_tango_color('323232323232') cfg.default_cursor_color = cfg.default_foreground_color for i in range(len(TANGO_PALLETE)): if i < len(color_table): color_table[i] = parse_tango_color(TANGO_PALLETE[i])
[]
voegtlel/auth-manager-backend
user_manager/oauth/oauth2.py
20d40de0abc9deeb3fcddd892ffe2e635301917a
from datetime import datetime, timedelta from enum import Enum from typing import List, Optional, Tuple, Dict, Any, Union import time from authlib.common.security import generate_token from authlib.consts import default_json_headers from authlib.oauth2 import ( OAuth2Request, AuthorizationServer as _AuthorizationServer, ResourceProtector as _ResourceProtector, OAuth2Error, HttpRequest, ) from authlib.oauth2.rfc6749 import InvalidClientError from authlib.oauth2.rfc6749.grants import ( AuthorizationCodeGrant as _AuthorizationCodeGrant, RefreshTokenGrant as _RefreshTokenGrant, BaseGrant, ) from authlib.oauth2.rfc6749.grants import ( ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant, ) from authlib.oauth2.rfc6749.util import scope_to_list from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, \ InsufficientScopeError from authlib.oauth2.rfc8414 import AuthorizationServerMetadata from authlib.oidc.core import UserInfo from authlib.oidc.core.grants import ( OpenIDCode as _OpenIDCode, OpenIDImplicitGrant as _OpenIDImplicitGrant, OpenIDHybridGrant as _OpenIDHybridGrant, ) from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token from fastapi import HTTPException from starlette.concurrency import run_in_threadpool from starlette.responses import Response, JSONResponse from user_manager.common.config import config from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, \ UserPropertyType from user_manager.common.mongo import authorization_code_collection, token_collection, \ client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \ async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema from . import oauth2_key from .user_helper import UserWithRoles USERS_SCOPE = '*users' class TypedRequest(OAuth2Request): user: UserWithRoles credential: Union[DbAuthorizationCode, DbToken] client: DbClient class RedirectResponse(Response): def to_json_response(self) -> JSONResponse: return JSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=200, headers=dict(default_json_headers), ) class ErrorJSONResponse(JSONResponse): pass class ErrorRedirectResponse(RedirectResponse): def to_json_response(self) -> JSONResponse: return ErrorJSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=401, headers=dict(default_json_headers), ) class AuthorizationServer(_AuthorizationServer): metadata_class = AuthorizationServerMetadata def create_oauth2_request(self, request: TypedRequest): assert isinstance(request, OAuth2Request) return request def create_json_request(self, request): assert isinstance(request, HttpRequest) raise NotImplementedError() # TODO: Create HttpRequest with json in body. def handle_response(self, status_code: int, payload: Optional[dict], headers: List[Tuple[str, str]]): headers = dict(headers) if isinstance(payload, dict): return JSONResponse(payload, status_code=status_code, headers=headers) elif headers.get('Location'): assert not payload return RedirectResponse(status_code=status_code, headers=headers) assert False def handle_error_response(self, request: TypedRequest, error: OAuth2Error): status_code, body, headers = error( translations=self.get_translations(request), error_uris=self.get_error_uris(request) ) headers = dict(headers) if isinstance(body, dict): return ErrorJSONResponse( content=body, status_code=status_code, headers=headers, ) elif headers.get('Location'): assert not body return ErrorRedirectResponse( status_code=status_code, headers=headers, ) assert False def save_authorization_code(code: str, request: TypedRequest): nonce = request.data.get('nonce') item = DbAuthorizationCode( code=code, client_id=request.client.id, redirect_uri=request.redirect_uri, scope=request.scope, user_id=request.user.user.id, nonce=nonce, auth_time=int(time.time()), expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code), ) authorization_code_collection.insert_one(item.document()) return item class ExistsNonceMixin(object): def exists_nonce(self, nonce: str, request: TypedRequest): # exists = mongo.authorization_code_collection.count_documents( # {'client_id': request.client_id, 'nonce': nonce}, # limit=1, # ) mod_result = authorization_code_collection.update_one( {'client_id': request.client_id, 'nonce': nonce}, {'$set': {'nonce': None}}, ) if mod_result.modified_count != 1: return False return True class JwtConfigMixin(object): jwt_token_expiration: int def get_jwt_config(self, *args, **kwargs): return { 'key': oauth2_key.key.key, 'alg': oauth2_key.key.jwk.alg.value, 'iss': config.oauth2.issuer, 'exp': self.jwt_token_expiration, } class UserInfoMixin(object): def _translate_properties( self, scope: str, schema: DbManagerSchema, ) -> List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]: scope_list = ['*'] + scope_to_list(scope) return [ (prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name) for scope_name in scope_list if scope_name not in ('openid', 'offline_access') and scope_name in schema.scopes_by_key for prop in schema.scopes_by_key[scope_name].properties if prop.user_property in schema.properties_by_key ] def generate_user_info(self, user: UserWithRoles, scope: str): user_data = { 'roles': user.roles, } for key, prop, group_type, group_by_name in self._translate_properties(scope, read_schema()): if not hasattr(user.user, prop.key): continue value = getattr(user.user, prop.key, None) if prop.type == UserPropertyType.picture: if value is not None: value = f"{config.oauth2.base_url}/picture/{value}" elif prop.type == UserPropertyType.groups: group_filter = {} if group_type is None else {'group_type': group_type} value = [ group['group_name'] if group_by_name else group['_id'] for group in user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter}, projection={'group_name' if group_by_name else '_id': 1} ) ] elif prop.type in ( UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token ): continue user_data[key] = value return UserInfo(**user_data) async def async_generate_user_info(self, user: UserWithRoles, scope: str): user_data = { 'roles': user.roles, } for key, prop, group_type, group_by_name in self._translate_properties(scope, await async_read_schema()): if not hasattr(user.user, prop.key): continue value = getattr(user.user, prop.key, None) if prop.type == UserPropertyType.picture: if value is not None: value = f"{config.oauth2.base_url}/picture/{value}" elif prop.type == UserPropertyType.groups: group_filter = {} if group_type is None else {'group_type': group_type} value = [ group['group_name'] if group_by_name else group['_id'] async for group in async_user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter}, projection={'group_name' if group_by_name else '_id': 1} ) ] elif prop.type in ( UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token ): continue user_data[key] = value return UserInfo(**user_data) class AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic', 'client_secret_post'] AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length def save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code, request) def query_authorization_code(self, code: str, client: DbClient): auth_code_data = authorization_code_collection.find_one({'_id': code, 'client_id': client.id}) if auth_code_data is None: return None auth_code = DbAuthorizationCode.validate_document(auth_code_data) if auth_code.is_expired(): return None return auth_code def delete_authorization_code(self, authorization_code: DbAuthorizationCode): authorization_code_collection.delete_one({'_id': authorization_code.code}) def authenticate_user(self, authorization_code: DbAuthorizationCode): return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id) class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def authenticate_token_endpoint_client(self): # Must override this to set the client in the request, to make it available to authenticate_user client = super(self).authenticate_token_endpoint_client() self.request.client = client return client def authenticate_user(self, username: str, password: str): user_data = user_collection.find_one({'email': username, 'access_tokens.token': password, 'active': True}) if user_data is None: return None return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id) class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode): jwt_token_expiration = config.oauth2.token_expiration.authorization_code class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit def generate_authorization_code(self) -> str: return generate_token(config.oauth2.authorization_code_length) def save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code, request) class RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN = True def authenticate_refresh_token(self, refresh_token: str): token_data = token_collection.find_one({'refresh_token': refresh_token}) if token_data is None: return None auth_code = DbToken.validate_document(token_data) if auth_code.is_expired(): return None return auth_code def authenticate_user(self, credential: DbToken): return UserWithRoles.load(credential.user_id, credential.client_id) def revoke_old_credential(self, credential: DbToken): # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) token_collection.delete_one({'_id': credential.access_token}) def save_token(token: Dict[str, Any], request: TypedRequest): if request.user: user_id = request.user.user.id else: user_id = None now = int(time.time()) token_data = DbToken.validate_document({ 'client_id': request.client.id, 'user_id': user_id, 'issued_at': now, 'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)), 'scope': request.scope, 'auth_time': request.credential.get_auth_time(), **token }) token_collection.insert_one(token_data.document()) return token_data def query_client(client_id: str): client_data = client_collection.find_one({'_id': client_id}) if client_data is None: return None return DbClient.validate_document(client_data) async def async_query_client(client_id: str): client_data = await async_client_collection.find_one({'_id': client_id}) if client_data is None: return None return DbClient.validate_document(client_data) def token_generator(*_): return generate_token(config.oauth2.token_length) class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin): jwt_token_expiration = config.oauth2.token_expiration.authorization_code def __call__(self, client: DbClient, grant_type: str, user: UserWithRoles, scope: str): jwt_config = self.get_jwt_config() jwt_config['aud'] = [client.get_client_id()] jwt_config['auth_time'] = int(time.time()) user_info = {'sub': user.user.id, 'roles': user.roles} if 'groups' in scope_to_list(scope): user_info['groups'] = user.user.groups return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config) def token_expires_in(_, grant_type: str): return getattr(config.oauth2.token_expiration, grant_type) class BearerToken(_BearerToken): def __call__(self, client, grant_type, user=None, scope=None, expires_in=None, include_refresh_token=True): if 'offline_access' not in scope_to_list(scope): include_refresh_token = False return super(BearerToken, self).__call__(client, grant_type, user, scope, expires_in, include_refresh_token) authorization = AuthorizationServer( query_client, save_token, BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator), ) class OpenIDSessionState: def __call__(self, grant: BaseGrant): grant.register_hook('process_token', self.process_token) def process_token(self, grant: BaseGrant, token: dict): scope = token.get('scope') if not scope or not is_openid_scope(scope): # standard authorization code flow return token token['session_state'] = str(grant.request.user.last_modified) return token # support all openid grants authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class BearerTokenValidator(_BearerTokenValidator): def authenticate_token(self, token_string: str): token_data = token_collection.find_one({'_id': token_string}) if token_data is None: return None token = DbToken.validate_document(token_data) if client_user_cache_collection.count_documents({ 'client_id': token.client_id, 'user_id': token.user_id, }) != 1: return None return token def request_invalid(self, request: TypedRequest): return False def token_revoked(self, token: DbToken): return token.revoked class ResourceProtector(_ResourceProtector): def validate(self, request: OAuth2Request, scope: str = None, scope_operator='AND') -> DbToken: assert isinstance(request, OAuth2Request) return self.validate_request(scope, request, scope_operator) class UserIntrospection(UserInfoMixin): async def create_response(self, request: TypedRequest) -> Response: try: assert isinstance(request, OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if request.token is None: raise HTTPException(403, "Invalid token") request.user = await UserWithRoles.async_load(request.token.user_id, request.token.client_id) user_info = await self.async_generate_user_info(request.user, request.token.scope) return JSONResponse(user_info) except OAuth2Error as error: return authorization.handle_error_response(request, error) class RequestOriginVerifier: async def create_response(self, request: TypedRequest, origin: str) -> Optional[Response]: try: assert isinstance(request, OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if request.token is None: raise HTTPException(403, "Invalid token") request.client = await async_query_client(request.token.client_id) if request.client is None: raise HTTPException(403, "Invalid client in token") if not request.client.check_redirect_uri(origin): raise HTTPException(403, "Allowed redirect uri does not match request") return None except OAuth2Error as error: return authorization.handle_error_response(request, error) class OtherUserInspection(UserInfoMixin): async def create_response(self, request: TypedRequest, user_id: str, client_auth: dict = None) -> Response: try: assert isinstance(request, OAuth2Request) if request.client is None: request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if request.token is None: raise HTTPException(403, "Invalid token") client_id = request.token.client_id scopes = request.token.scope scope = USERS_SCOPE else: client_id = request.client_id scopes = request.client.allowed_scope scope = scopes if USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing "*users" scope', request.uri) user = await UserWithRoles.async_load(user_id, client_id) if user is None: raise HTTPException(404, "User not found") user_info = await self.async_generate_user_info(user, scope) return JSONResponse(user_info) except OAuth2Error as error: return authorization.handle_error_response(request, error) class OtherUsersInspection(UserInfoMixin): async def create_response(self, request: TypedRequest) -> Response: try: assert isinstance(request, OAuth2Request) if request.client is None: request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if request.token is None: raise HTTPException(403, "Invalid token") client_id = request.token.client_id scopes = request.token.scope scope = USERS_SCOPE load_roles = False else: client_id = request.client_id scopes = request.client.allowed_scope scope = scopes load_roles = True if USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing "*users" scope', request.uri) user_infos = [] for user in await UserWithRoles.async_load_all(client_id, load_roles=load_roles): user_info = await self.async_generate_user_info(user, scope) if not load_roles: del user_info['roles'] user_infos.append(user_info) return JSONResponse(user_infos) except OAuth2Error as error: return authorization.handle_error_response(request, error) class TypeHint(str, Enum): AccessToken = "access_token" RefreshToken = "refresh_token" class RevocationEndpoint: async def create_response( self, raw_token: str, token_type_hint: Optional[TypeHint], request: TypedRequest ) -> Response: token_data = None if token_type_hint is None or token_type_hint == TypeHint.AccessToken: token_data = await async_token_collection.find_one({'_id': raw_token}) if token_data is None and (token_type_hint is None or token_type_hint == TypeHint.RefreshToken): token_data = await async_token_collection.find_one({'refresh_token': raw_token}) if token_data is None: return Response() token = DbToken.validate_document(token_data) try: if request.client_id is None: request.data['client_id'] = token.client_id elif token.client_id != request.client_id: raise InvalidClientError(state=request.state, status_code=401) await run_in_threadpool( authorization.authenticate_client, request, ["none", "client_secret_basic", "client_secret_post"] ) # await async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked': True}}) # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) await async_token_collection.delete_one({'_id': token.access_token}) return Response() except OAuth2Error as error: return authorization.handle_error_response(request, error) resource_protector = ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) user_introspection = UserIntrospection() token_revocation = RevocationEndpoint() request_origin_verifier = RequestOriginVerifier() other_user_inspection = OtherUserInspection() other_users_inspection = OtherUsersInspection()
[((331, 18, 331, 64), 'user_manager.common.mongo.client_collection.find_one', 'client_collection.find_one', ({(331, 45, 331, 63): "{'_id': client_id}"}, {}), "({'_id': client_id})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((334, 11, 334, 50), 'user_manager.common.models.DbClient.validate_document', 'DbClient.validate_document', ({(334, 38, 334, 49): 'client_data'}, {}), '(client_data)', False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((341, 11, 341, 50), 'user_manager.common.models.DbClient.validate_document', 'DbClient.validate_document', ({(341, 38, 341, 49): 'client_data'}, {}), '(client_data)', False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((345, 11, 345, 53), 'authlib.common.security.generate_token', 'generate_token', ({(345, 26, 345, 52): 'config.oauth2.token_length'}, {}), '(config.oauth2.token_length)', False, 'from authlib.common.security import generate_token\n'), ((143, 21, 146, 9), 'user_manager.common.mongo.authorization_code_collection.update_one', 'authorization_code_collection.update_one', ({(144, 12, 144, 60): "{'client_id': request.client_id, 'nonce': nonce}", (145, 12, 145, 37): "{'$set': {'nonce': None}}"}, {}), "({'client_id': request.client_id,\n 'nonce': nonce}, {'$set': {'nonce': None}})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((204, 15, 204, 36), 'authlib.oidc.core.UserInfo', 'UserInfo', ({}, {}), '(**user_data)', False, 'from authlib.oidc.core import UserInfo\n'), ((231, 15, 231, 36), 'authlib.oidc.core.UserInfo', 'UserInfo', ({}, {}), '(**user_data)', False, 'from authlib.oidc.core import UserInfo\n'), ((242, 25, 242, 102), 'user_manager.common.mongo.authorization_code_collection.find_one', 'authorization_code_collection.find_one', ({(242, 64, 242, 101): "{'_id': code, 'client_id': client.id}"}, {}), "({'_id': code, 'client_id': client.id})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((245, 20, 245, 73), 'user_manager.common.models.DbAuthorizationCode.validate_document', 'DbAuthorizationCode.validate_document', ({(245, 58, 245, 72): 'auth_code_data'}, {}), '(auth_code_data)', False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((251, 8, 251, 82), 'user_manager.common.mongo.authorization_code_collection.delete_one', 'authorization_code_collection.delete_one', ({(251, 49, 251, 81): "{'_id': authorization_code.code}"}, {}), "({'_id': authorization_code.code})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((266, 20, 266, 114), 'user_manager.common.mongo.user_collection.find_one', 'user_collection.find_one', ({(266, 45, 266, 113): "{'email': username, 'access_tokens.token': password, 'active': True}"}, {}), "({'email': username, 'access_tokens.token':\n password, 'active': True})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((284, 15, 284, 70), 'authlib.common.security.generate_token', 'generate_token', ({(284, 30, 284, 69): 'config.oauth2.authorization_code_length'}, {}), '(config.oauth2.authorization_code_length)', False, 'from authlib.common.security import generate_token\n'), ((295, 21, 295, 80), 'user_manager.common.mongo.token_collection.find_one', 'token_collection.find_one', ({(295, 47, 295, 79): "{'refresh_token': refresh_token}"}, {}), "({'refresh_token': refresh_token})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((298, 20, 298, 57), 'user_manager.common.models.DbToken.validate_document', 'DbToken.validate_document', ({(298, 46, 298, 56): 'token_data'}, {}), '(token_data)', False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((308, 8, 308, 69), 'user_manager.common.mongo.token_collection.delete_one', 'token_collection.delete_one', ({(308, 36, 308, 68): "{'_id': credential.access_token}"}, {}), "({'_id': credential.access_token})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((316, 14, 316, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((338, 24, 338, 76), 'user_manager.common.mongo.async_client_collection.find_one', 'async_client_collection.find_one', ({(338, 57, 338, 75): "{'_id': client_id}"}, {}), "({'_id': client_id})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((406, 21, 406, 69), 'user_manager.common.mongo.token_collection.find_one', 'token_collection.find_one', ({(406, 47, 406, 68): "{'_id': token_string}"}, {}), "({'_id': token_string})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((409, 16, 409, 53), 'user_manager.common.models.DbToken.validate_document', 'DbToken.validate_document', ({(409, 42, 409, 52): 'token_data'}, {}), '(token_data)', False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((534, 16, 534, 53), 'user_manager.common.models.DbToken.validate_document', 'DbToken.validate_document', ({(534, 42, 534, 52): 'token_data'}, {}), '(token_data)', False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((94, 19, 94, 82), 'starlette.responses.JSONResponse', 'JSONResponse', (), '', False, 'from starlette.responses import Response, JSONResponse\n'), ((170, 29, 170, 49), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', ({(170, 43, 170, 48): 'scope'}, {}), '(scope)', False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((183, 86, 183, 99), 'user_manager.common.mongo.read_schema', 'read_schema', ({}, {}), '()', False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((269, 41, 269, 76), 'user_manager.common.models.DbUser.validate_document', 'DbUser.validate_document', ({(269, 66, 269, 75): 'user_data'}, {}), '(user_data)', False, 'from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, UserPropertyType\n'), ((354, 38, 354, 49), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((357, 23, 357, 43), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', ({(357, 37, 357, 42): 'scope'}, {}), '(scope)', False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((369, 35, 369, 55), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', ({(369, 49, 369, 54): 'scope'}, {}), '(scope)', False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((410, 11, 413, 10), 'user_manager.common.mongo.client_user_cache_collection.count_documents', 'client_user_cache_collection.count_documents', ({(410, 56, 413, 9): "{'client_id': token.client_id, 'user_id': token.user_id}"}, {}), "({'client_id': token.client_id,\n 'user_id': token.user_id})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((439, 19, 439, 42), 'starlette.responses.JSONResponse', 'JSONResponse', ({(439, 32, 439, 41): 'user_info'}, {}), '(user_info)', False, 'from starlette.responses import Response, JSONResponse\n'), ((482, 19, 482, 42), 'starlette.responses.JSONResponse', 'JSONResponse', ({(482, 32, 482, 41): 'user_info'}, {}), '(user_info)', False, 'from starlette.responses import Response, JSONResponse\n'), ((512, 19, 512, 43), 'starlette.responses.JSONResponse', 'JSONResponse', ({(512, 32, 512, 42): 'user_infos'}, {}), '(user_infos)', False, 'from starlette.responses import Response, JSONResponse\n'), ((533, 19, 533, 29), 'starlette.responses.Response', 'Response', ({}, {}), '()', False, 'from starlette.responses import Response, JSONResponse\n'), ((546, 19, 546, 29), 'starlette.responses.Response', 'Response', ({}, {}), '()', False, 'from starlette.responses import Response, JSONResponse\n'), ((130, 22, 130, 33), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((131, 24, 131, 41), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((131, 44, 131, 112), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((210, 92, 210, 111), 'user_manager.common.mongo.async_read_schema', 'async_read_schema', ({}, {}), '()', False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((321, 27, 321, 44), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((359, 53, 359, 102), 'authlib.common.security.generate_token', 'generate_token', ({(359, 68, 359, 101): 'config.oauth2.access_token_length'}, {}), '(config.oauth2.access_token_length)', False, 'from authlib.common.security import generate_token\n'), ((388, 28, 388, 50), 'authlib.oidc.core.grants.util.is_openid_scope', 'is_openid_scope', ({(388, 44, 388, 49): 'scope'}, {}), '(scope)', False, 'from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token\n'), ((434, 34, 434, 103), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', ({(434, 52, 434, 87): 'resource_protector.validate_request', (434, 89, 434, 93): 'None', (434, 95, 434, 102): 'request'}, {}), '(resource_protector.validate_request, None, request)', False, 'from starlette.concurrency import run_in_threadpool\n'), ((436, 22, 436, 57), 'fastapi.HTTPException', 'HTTPException', ({(436, 36, 436, 39): '(403)', (436, 41, 436, 56): '"""Invalid token"""'}, {}), "(403, 'Invalid token')", False, 'from fastapi import HTTPException\n'), ((448, 34, 448, 103), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', ({(448, 52, 448, 87): 'resource_protector.validate_request', (448, 89, 448, 93): 'None', (448, 95, 448, 102): 'request'}, {}), '(resource_protector.validate_request, None, request)', False, 'from starlette.concurrency import run_in_threadpool\n'), ((450, 22, 450, 57), 'fastapi.HTTPException', 'HTTPException', ({(450, 36, 450, 39): '(403)', (450, 41, 450, 56): '"""Invalid token"""'}, {}), "(403, 'Invalid token')", False, 'from fastapi import HTTPException\n'), ((453, 22, 453, 67), 'fastapi.HTTPException', 'HTTPException', ({(453, 36, 453, 39): '(403)', (453, 41, 453, 66): '"""Invalid client in token"""'}, {}), "(403, 'Invalid client in token')", False, 'from fastapi import HTTPException\n'), ((455, 22, 455, 87), 'fastapi.HTTPException', 'HTTPException', ({(455, 36, 455, 39): '(403)', (455, 41, 455, 86): '"""Allowed redirect uri does not match request"""'}, {}), "(403, 'Allowed redirect uri does not match request')", False, 'from fastapi import HTTPException\n'), ((476, 34, 476, 55), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', ({(476, 48, 476, 54): 'scopes'}, {}), '(scopes)', False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((477, 22, 477, 83), 'authlib.oauth2.rfc6750.InsufficientScopeError', 'InsufficientScopeError', ({(477, 45, 477, 69): '"""Missing "*users" scope"""', (477, 71, 477, 82): 'request.uri'}, {}), '(\'Missing "*users" scope\', request.uri)', False, 'from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, InsufficientScopeError\n'), ((480, 22, 480, 58), 'fastapi.HTTPException', 'HTTPException', ({(480, 36, 480, 39): '(404)', (480, 41, 480, 57): '"""User not found"""'}, {}), "(404, 'User not found')", False, 'from fastapi import HTTPException\n'), ((504, 34, 504, 55), 'authlib.oauth2.rfc6749.util.scope_to_list', 'scope_to_list', ({(504, 48, 504, 54): 'scopes'}, {}), '(scopes)', False, 'from authlib.oauth2.rfc6749.util import scope_to_list\n'), ((505, 22, 505, 83), 'authlib.oauth2.rfc6750.InsufficientScopeError', 'InsufficientScopeError', ({(505, 45, 505, 69): '"""Missing "*users" scope"""', (505, 71, 505, 82): 'request.uri'}, {}), '(\'Missing "*users" scope\', request.uri)', False, 'from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, InsufficientScopeError\n'), ((529, 31, 529, 82), 'user_manager.common.mongo.async_token_collection.find_one', 'async_token_collection.find_one', ({(529, 63, 529, 81): "{'_id': raw_token}"}, {}), "({'_id': raw_token})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((531, 31, 531, 92), 'user_manager.common.mongo.async_token_collection.find_one', 'async_token_collection.find_one', ({(531, 63, 531, 91): "{'refresh_token': raw_token}"}, {}), "({'refresh_token': raw_token})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((540, 18, 542, 13), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', ({(541, 16, 541, 49): 'authorization.authenticate_client', (541, 51, 541, 58): 'request', (541, 60, 541, 113): "['none', 'client_secret_basic', 'client_secret_post']"}, {}), "(authorization.authenticate_client, request, ['none',\n 'client_secret_basic', 'client_secret_post'])", False, 'from starlette.concurrency import run_in_threadpool\n'), ((545, 18, 545, 80), 'user_manager.common.mongo.async_token_collection.delete_one', 'async_token_collection.delete_one', ({(545, 52, 545, 79): "{'_id': token.access_token}"}, {}), "({'_id': token.access_token})", False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((466, 38, 466, 107), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', ({(466, 56, 466, 91): 'resource_protector.validate_request', (466, 93, 466, 97): 'None', (466, 99, 466, 106): 'request'}, {}), '(resource_protector.validate_request, None, request)', False, 'from starlette.concurrency import run_in_threadpool\n'), ((468, 26, 468, 61), 'fastapi.HTTPException', 'HTTPException', ({(468, 40, 468, 43): '(403)', (468, 45, 468, 60): '"""Invalid token"""'}, {}), "(403, 'Invalid token')", False, 'from fastapi import HTTPException\n'), ((492, 38, 492, 107), 'starlette.concurrency.run_in_threadpool', 'run_in_threadpool', ({(492, 56, 492, 91): 'resource_protector.validate_request', (492, 93, 492, 97): 'None', (492, 99, 492, 106): 'request'}, {}), '(resource_protector.validate_request, None, request)', False, 'from starlette.concurrency import run_in_threadpool\n'), ((494, 26, 494, 61), 'fastapi.HTTPException', 'HTTPException', ({(494, 40, 494, 43): '(403)', (494, 45, 494, 60): '"""Invalid token"""'}, {}), "(403, 'Invalid token')", False, 'from fastapi import HTTPException\n'), ((539, 22, 539, 78), 'authlib.oauth2.rfc6749.InvalidClientError', 'InvalidClientError', (), '', False, 'from authlib.oauth2.rfc6749 import InvalidClientError\n'), ((194, 33, 197, 21), 'user_manager.common.mongo.user_group_collection.find', 'user_group_collection.find', (), '', False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n'), ((221, 39, 224, 21), 'user_manager.common.mongo.async_user_group_collection.find', 'async_user_group_collection.find', (), '', False, 'from user_manager.common.mongo import authorization_code_collection, token_collection, client_collection, client_user_cache_collection, user_group_collection, async_token_collection, async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema\n')]
claws/adsb
src/adsb/sbs/server.py
4a7d35880dece6baaf24370fab445e2571fc19e9
import asyncio import datetime import logging import socket from . import protocol from typing import Tuple from asyncio import AbstractEventLoop logger = logging.getLogger(__name__) class Server(object): def __init__( self, host: str = "localhost", port: int = 30003, backlog=100, loop: AbstractEventLoop = None, ) -> None: self.loop = loop or asyncio.get_event_loop() self.host = host self._requested_port = port self.port = None self.backlog = backlog self.listener = None self.protocols = {} async def start(self) -> None: """ Start the server """ try: self.listener = await self.loop.create_server( lambda: protocol.SBSServerProtocol(self), self.host, self._requested_port, family=socket.AF_INET, backlog=self.backlog, ) # type: asyncio.Server # Fetch actual port in use. This can be different from the # specified port if the port was passed as 0 which means use # an ephemeral port. assert len(self.listener.sockets) == 1 _, self.port = self.listener.sockets[0].getsockname() except asyncio.CancelledError: logger.exception("Connection waiter Future was cancelled") except Exception: logger.exception("An error occurred in start") async def stop(self) -> None: """ Stop the server """ if self.listener: # Avoid iterating over the protocols dict which may change size # while it is being iterating over. peers = list(self.protocols) for peer in peers: prot = self.protocols.get(peer) if prot: prot.close() self.listener.close() def register_protocol( self, peer: Tuple[str, int], prot: "SBSServerProtocol" ) -> None: """ Register a protocol instance with the server. :param peer: Tuple of (host:str, port:int). :param prot: a SBSServerProtocol instance. """ self.protocols[peer] = prot def deregister_protocol(self, peer: Tuple[str, int]) -> None: """ De-register a protocol instance from the server. This peer will no longer receive messages. :param peer: Tuple of (host:str, port:int). """ del self.protocols[peer] def send_message(self, msg: bytes, peer: Tuple[str, int] = None) -> None: """ Send a message. :param msg: A bytes object representing the SBS format message to send to peers. The message is assumed to include the end of message delimiter. :param peer: A specific peer to send the message to. Peer is a Tuple of (host:str, port:int). If not specified then the message is broadcast to all peers. """ if self.protocols: if peer: prot = self.protocols.get(peer) if prot: prot.send_message(msg) else: raise Exception( f"Server can't send msg to non-existant peer: {peer}" ) else: # broadcast message to all peers for peer, prot in self.protocols.items(): prot.send_message(msg) else: raise Exception("Server can't send msg, no peers available")
[((13, 9, 13, 36), 'logging.getLogger', 'logging.getLogger', ({(13, 27, 13, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((25, 28, 25, 52), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n')]
kandahk/robusta
src/robusta/core/model/events.py
61a2001cb1c4e90e8a74b810463ec99e6cb80787
import logging import uuid from enum import Enum from typing import List, Optional, Dict, Any from dataclasses import dataclass, field from pydantic import BaseModel from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler from ..reporting.base import Finding, BaseBlock class EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE = 1 PROMETHEUS = 2 MANUAL_TRIGGER = 3 SCHEDULED_TRIGGER = 4 class ExecutionEventBaseParams(BaseModel): named_sinks: Optional[List[str]] = None # Right now: # 1. this is a dataclass but we need to make all fields optional in subclasses because of https://stackoverflow.com/questions/51575931/ # 2. this can't be a pydantic BaseModel because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557) # once the pydantic PR that addresses those issues is merged, this should be a pydantic class # (note that we need to integrate with dataclasses because of hikaru) @dataclass class ExecutionBaseEvent: findings: Dict[str, Finding] = field(default_factory=lambda: {}) named_sinks: Optional[List[str]] = None response: Dict[ str, Any ] = None # Response returned to caller. For admission or manual triggers for example stop_processing: bool = False _scheduler: Optional[PlaybooksScheduler] = None def set_scheduler(self, scheduler: PlaybooksScheduler): self._scheduler = scheduler def get_scheduler(self) -> PlaybooksScheduler: return self._scheduler def create_default_finding(self) -> Finding: """Create finding default fields according to the event type""" return Finding(title="Generic Finding", aggregation_key="Generic finding key") def add_enrichment( self, enrichment_blocks: List[BaseBlock], annotations=None, finding_key: str = "DEFAULT", ): finding = self.findings.get(finding_key) if not finding: finding = self.create_default_finding() self.findings[finding_key] = finding finding.add_enrichment(enrichment_blocks, annotations) def add_finding(self, finding: Finding, finding_key: str = None): if ( not finding_key ): # user didn't specify a key, so this finding shouldn't be accessed by key. Randomise it finding_key = str(uuid.uuid4()) existing_finding = self.findings.get(finding_key) if existing_finding: logging.warning( f"Overriding existing finding. finding_key: {finding_key} new finding: {finding}" ) self.findings[finding_key] = finding @staticmethod def from_params(params: ExecutionEventBaseParams) -> Optional["ExecutionBaseEvent"]: return ExecutionBaseEvent(named_sinks=params.named_sinks)
[((31, 35, 31, 68), 'dataclasses.field', 'field', (), '', False, 'from dataclasses import dataclass, field\n'), ((70, 12, 72, 13), 'logging.warning', 'logging.warning', ({(71, 16, 71, 97): 'f"""Overriding existing finding. finding_key: {finding_key} new finding: {finding}"""'}, {}), "(\n f'Overriding existing finding. finding_key: {finding_key} new finding: {finding}'\n )", False, 'import logging\n'), ((66, 30, 66, 42), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n')]
pfrantz/graphene-mongo
examples/django_mongoengine/bike/models.py
f7d4f3e194ec41793e6da547934c34e11fd9ef51
from mongoengine import Document from mongoengine.fields import ( FloatField, StringField, ListField, URLField, ObjectIdField, ) class Shop(Document): meta = {"collection": "shop"} ID = ObjectIdField() name = StringField() address = StringField() website = URLField() class Bike(Document): meta = {"collection": "bike"} ID = ObjectIdField() name = StringField() brand = StringField() year = StringField() size = ListField(StringField()) wheel_size = FloatField() type = StringField()
[((13, 9, 13, 24), 'mongoengine.fields.ObjectIdField', 'ObjectIdField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((14, 11, 14, 24), 'mongoengine.fields.StringField', 'StringField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((15, 14, 15, 27), 'mongoengine.fields.StringField', 'StringField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((16, 14, 16, 24), 'mongoengine.fields.URLField', 'URLField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((21, 9, 21, 24), 'mongoengine.fields.ObjectIdField', 'ObjectIdField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((22, 11, 22, 24), 'mongoengine.fields.StringField', 'StringField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((23, 12, 23, 25), 'mongoengine.fields.StringField', 'StringField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((24, 11, 24, 24), 'mongoengine.fields.StringField', 'StringField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((26, 17, 26, 29), 'mongoengine.fields.FloatField', 'FloatField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((27, 11, 27, 24), 'mongoengine.fields.StringField', 'StringField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n'), ((25, 21, 25, 34), 'mongoengine.fields.StringField', 'StringField', ({}, {}), '()', False, 'from mongoengine.fields import FloatField, StringField, ListField, URLField, ObjectIdField\n')]
jedhsu/tensor
src/tensor/tensor/movement/__init__.py
3b2fe21029fa7c50b034190e77d79d1a94ea5e8f
from ._movement import Movement from .path import MovementPath from .paths import MovementPaths
[]
sauloal/ipython
opticalmapping/standalone/om_augmenter.py
35c24a10330da3e54b5ee29df54ee263f5268d18
#!/usr/bin/python import os import sys from om_shared import * def parse_args(args): parser = argparse.ArgumentParser(description="Bionano Genomics MAP parser") parser.add_argument( 'infile', help="MAP file" ) parser.add_argument( '-g' , '--count' , action='store_false', help="DO NOT perform global count" ) parser.add_argument( '-c' , '--conf' , action='store_false', help="DO NOT perform confidence stats" ) args = parser.parse_args(args=args) return args def main(args): valid_fields = gen_valid_fields(valid_fields_g) infile = args.infile DO_GLOBAL_COUNT = args.count DO_CONFIDENCE_STATS = args.conf oufile = infile + ".augmented.tsv" if not os.path.exists(infile): print "input file %s does not exists" % infile sys.exit(1) if os.path.isdir(infile): print "input file %s is a folder" % infile sys.exit(1) print "saving to %s" % oufile data, headers, names, seman, types, indexer, groups, ref_maps_from, query_maps_from, filters_csv = parse_file(infile, valid_fields) print "NAMES" , names print "TYPES" , types #print "HEADERS", "\n".join( headers ) #print "DATA" , data[1] #print "INDEX", indexer.keys()[0], indexer[indexer.keys()[0]] print "file has %5d maps and %3d chromosomes" % (len(indexer["QryContigID"]), len(indexer["RefContigID"])) if DO_GLOBAL_COUNT: print "PRINTING GLOBAL COUNT" for RefContigID in sorted(groups["RefContigID_QryContigID"]): print "chromosome %2d has %4d maps" % ( RefContigID, len(groups["RefContigID_QryContigID"][RefContigID])) print if DO_CONFIDENCE_STATS: print "PRINTING CONFIDENCE STATS" for QryContigID in sorted(groups["QryContigID_RefContigID"]): print "query %5d maps to %2d chromosomes" % (QryContigID, len(groups["QryContigID_RefContigID"][QryContigID])) XmapEntryIDs = groups["QryContigID_XmapEntryID"][QryContigID].keys() Confidences = [groups["XmapEntryID_Confidence"][x].keys()[0] for x in XmapEntryIDs] print " confidences ", Confidences max_confidence = max(Confidences) print " max confidence ", max_confidence print " max confidence chrom", data[list(groups["XmapEntryID_Confidence"][XmapEntryIDs[Confidences.index(max_confidence)]][max_confidence])[0]][seman["RefContigID"]] print print "CREATING REPORT:", oufile data = [ KeyedTuple(x, labels=names)._asdict() for x in data ] with open(oufile, "w") as reporter: reporter.write("\n".join(headers[:-2]) + "\n#\n") reporter.write("# FIELDS:\n") reporter.write( "\n".join( [ "# %-39s: %s" % ( x, valid_fields['helps_t'][x] ) for x in valid_fields['names' ] ] ) + "\n#\n") reporter.write("#h " + "\t".join( [ "%-39s" % ( x ) for x in valid_fields['names' ] ] ) + "\n" ) reporter.write("#f " + "\t".join( [ "%-39s" % ( valid_fields['types' ][x] ) for x in valid_fields['names' ] ] ) + "\n" ) for RefContigID in sorted(groups["RefContigID_QryContigID"]): QryContigIDs = groups["RefContigID_QryContigID"][RefContigID] for QryContigID in sorted(QryContigIDs): data_poses = list(groups["RefContigID_QryContigID"][RefContigID][QryContigID]) all_data_poses = list(indexer["QryContigID"][QryContigID]) data_vals = [ data[x] for x in data_poses ] stats = stats_from_data_vals(RefContigID, QryContigID, groups, indexer, data, data_vals, all_data_poses) #print "RefContigID %4d QryContigID %6d" % ( RefContigID, QryContigID ) for data_val in data_vals: cigar = data_val["HitEnum"] cigar_matches, cigar_insertions, cigar_deletions = process_cigar(cigar) Alignment = data_val["Alignment"] alignment_count_queries, alignment_count_refs, alignment_count_refs_colapses, alignment_count_queries_colapses = process_alignment(Alignment) for stat in stats: data_val[stat] = stats[stat] data_val["_meta_alignment_count_queries" ] = alignment_count_queries data_val["_meta_alignment_count_queries_colapses" ] = alignment_count_refs_colapses data_val["_meta_alignment_count_refs" ] = alignment_count_refs data_val["_meta_alignment_count_refs_colapses" ] = alignment_count_queries_colapses data_val["_meta_cigar_deletions" ] = cigar_deletions data_val["_meta_cigar_insertions" ] = cigar_insertions data_val["_meta_cigar_matches" ] = cigar_matches data_val["_meta_proportion_query_len_gapped" ] = (data_val['_meta_len_qry_match_gapped'] * 1.0)/ data_val["QryLen"] data_val["_meta_proportion_query_len_no_gap" ] = (data_val['_meta_len_qry_match_no_gap'] * 1.0)/ data_val["QryLen"] #print " ", " ".join( ["%s %s" % (x, str(data_val[x])) for x in sorted(data_val)] ) reporter.write( "\t".join( [ str(data_val[x]) for x in valid_fields['names' ] ] ) + "\n" ) if __name__ == '__main__': if len(sys.argv) ==1: print "no arguments given" sys.exit(1) args = parse_args(sys.argv[1:]) main(args) """ # $ cd D:\Plextor\data\Acquisitie\BioNanoGenomics\MyLycopersicumWorkspace_31022015\Imports; C:\Program Files\BioNano Genomics\RefAligner\WindowsRefAligner.exe -f -ref D:\Plextor\data\Acquisitie\BioNanoGenomics\MyLycopersicumWorkspace_31022015\Imports\S_lycopersicum_chromosomes.2.50.BspQI-BbvCI.cmap -i D:\Plextor\data\Acquisitie\BioNanoGenomics\MyLycopersicumWorkspace_31022015\Imports\EXP_REFINEFINAL1.cmap -o S_lycopersicum_chromosomes.2.50.BspQI-BbvCI_to_EXP_REFINEFINAL1 -endoutlier 1e-2 -outlier 1e-4 -extend 1 -FN 0.08 -FP 0.8 -sf 0.2 -sd 0 -sr 0.02 -res 2.9 -resSD 0.7 -mres 2.0 -A 5 -biaswt 0 -M 1 -Mfast 0 -maxmem 2 -T 1e-6 -stdout -stderr # r3498 $Header: http://svn.bnm.local:81/svn/informatics/RefAligner/branches/3480/RefAligner.cpp 3470 2014-12-17 19:29:21Z tanantharaman $ # FLAGS: USE_SSE=0 USE_AVX=0 USE_MIC=0 USE_PFLOAT=1 USE_RFLOAT=1 DEBUG=1 VERB=1 # XMAP File Version: 0.2 # Label Channels: 1 # Reference Maps From: S_lycopersicum_chromosomes.2.50.BspQI-BbvCI_to_EXP_REFINEFINAL1_r.cmap # Query Maps From: S_lycopersicum_chromosomes.2.50.BspQI-BbvCI_to_EXP_REFINEFINAL1_q.cmap #h XmapEntryID QryContigID RefContigID QryStartPos QryEndPos RefStartPos RefEndPos Orientation Confidence HitEnum QryLen RefLen LabelChannel Alignment #f int int int float float float float string float string float float int string 1 141 1 528400.6 571697.5 10672 54237.5 + 6.65 4M2D2M 1439123.5 21805821 1 "(1,34)(2,34)(3,35)(4,36)(5,37)(6,38)(8,38)(9,39)" 2 174 1 21236.5 1568390 10672 1553561 + 79.35 2M3D1M1D1M1D4M1I2M1D2M1D1M2I2D9M3I3M1D6M1D2M2D1M1D6M1D1M1D1M2D2M2D1M1I1D1M1D5M2D4M2D1M2D2M1D2M1D3M1D1M1D2M3I3D1M1D1M3D2M3D1M2I1D1M2D1M1D1M1I2D3M2I1M1D2M1D1M1D1M2I3D3M3D1M2D1M1D1M1D5M2D12M 1568410 21805821 1 "(1,2)(2,2)(3,3)(6,4)(7,4)(9,5)(11,6)(12,7)(13,8)(14,9)(15,11)(16,12)(18,13)(19,14)(20,15)(21,15)(24,18)(25,19)(26,20)(27,21)(28,22)(29,23)(30,24)(31,25)(32,26)(33,30)(34,31)(35,32)(37,33)(38,34)(39,35)(40,36)(41,37)(42,38)(44,39)(45,40)(47,41)(48,41)(50,42)(51,43)(52,44)(53,45)(54,46)(55,47)(57,48)(59,49)(60,50)(62,50)(63,51)(66,52)(68,54)(69,55)(70,55)(71,56)(72,57)(73,58)(74,59)(76,60)(77,60)(78,61)(79,62)(80,63)(82,64)(83,64)(86,65)(87,66)(89,67)(90,68)(92,69)(93,70)(94,71)(95,72)(96,72)(98,73)(99,74)(103,78)(105,79)(109,80)(110,81)(111,82)(114,82)(116,85)(119,86)(120,87)(121,87)(124,89)(125,90)(126,91)(127,94)(128,95)(129,95)(130,96)(132,97)(134,98)(138,101)(139,102)(140,103)(143,104)(144,104)(146,105)(147,105)(149,106)(151,107)(152,108)(153,109)(154,110)(155,111)(158,112)(159,113)(160,114)(161,115)(162,116)(163,117)(164,118)(165,119)(166,120)(167,121)(168,122)(169,123)" """
[]
OpenIxia/ixnetwork_restpy
uhd_restpy/testplatform/sessions/ixnetwork/impairment/profile/fixedclassifier/fixedclassifier.py
f628db450573a104f327cf3c737ca25586e067ae
# MIT LICENSE # # Copyright 1997 - 2020 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from uhd_restpy.base import Base from uhd_restpy.files import Files from typing import List, Any, Union class FixedClassifier(Base): """Specifies the packets to apply this profile to. If there are multiple patterns enabled, they are ANDed: each packet must match all packets in order to be impaired by this profile. The FixedClassifier class encapsulates a list of fixedClassifier resources that are managed by the user. A list of resources can be retrieved from the server using the FixedClassifier.find() method. The list can be managed by using the FixedClassifier.add() and FixedClassifier.remove() methods. """ __slots__ = () _SDM_NAME = 'fixedClassifier' _SDM_ATT_MAP = { } _SDM_ENUM_MAP = { } def __init__(self, parent, list_op=False): super(FixedClassifier, self).__init__(parent, list_op) @property def Pattern(self): """ Returns ------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the Pattern class Raises ------ - ServerError: The server has encountered an uncategorized error condition """ from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern if self._properties.get('Pattern', None) is not None: return self._properties.get('Pattern') else: return Pattern(self) def add(self): """Adds a new fixedClassifier resource on the server and adds it to the container. Returns ------- - self: This instance with all currently retrieved fixedClassifier resources using find and the newly added fixedClassifier resources available through an iterator or index Raises ------ - ServerError: The server has encountered an uncategorized error condition """ return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) def remove(self): """Deletes all the contained fixedClassifier resources in this instance from the server. Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ self._delete() def find(self): """Finds and retrieves fixedClassifier resources from the server. All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve fixedClassifier resources from the server. To retrieve an exact match ensure the parameter value starts with ^ and ends with $ By default the find method takes no parameters and will retrieve all fixedClassifier resources from the server. Returns ------- - self: This instance with matching fixedClassifier resources retrieved from the server available through an iterator or index Raises ------ - ServerError: The server has encountered an uncategorized error condition """ return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): """Retrieves a single instance of fixedClassifier data from the server. Args ---- - href (str): An href to the instance to be retrieved Returns ------- - self: This instance with the fixedClassifier resources from the server available through an iterator or index Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ return self._read(href)
[((59, 19, 59, 32), 'uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern', 'Pattern', ({(59, 27, 59, 31): 'self'}, {}), '(self)', False, 'from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern\n')]