hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a227ce94b0a45e7cdd4e6e6aacf20f14da02b40 | 4,465 | py | Python | aerosandbox/library/aerodynamics/inviscid.py | scivm/AeroSandbox | 616c579e49bc13c3023364773705eaac7df10da7 | [
"MIT"
] | 1 | 2021-04-07T08:59:31.000Z | 2021-04-07T08:59:31.000Z | aerosandbox/library/aerodynamics/inviscid.py | scivm/AeroSandbox | 616c579e49bc13c3023364773705eaac7df10da7 | [
"MIT"
] | null | null | null | aerosandbox/library/aerodynamics/inviscid.py | scivm/AeroSandbox | 616c579e49bc13c3023364773705eaac7df10da7 | [
"MIT"
] | null | null | null | import aerosandbox.numpy as np
def induced_drag(
lift,
span,
dynamic_pressure,
oswalds_efficiency=1,
):
"""
Computes the induced drag associated with a lifting planar wing.
Args:
lift: Lift force [Newtons]
span: Wing span [meters]
dynamic_pressure: Dynamic pressure [Pascals]
oswalds_efficiency: Oswald's efficiency factor [-]
Returns: Induced drag force [Newtons]
"""
return lift ** 2 / (
dynamic_pressure * np.pi * span ** 2 * oswalds_efficiency
)
def oswalds_efficiency(
taper_ratio: float,
aspect_ratio: float,
sweep: float = 0.,
fuselage_diameter_to_span_ratio: float = 0.,
) -> float:
"""
Computes the Oswald's efficiency factor for a planar, tapered, swept wing.
Based on "Estimating the Oswald Factor from Basic Aircraft Geometrical Parameters"
by M. Nita, D. Scholz; Hamburg Univ. of Applied Sciences, 2012.
Implementation of Section 5 from the above paper.
Only valid for backwards-swept wings; i.e. 0 <= sweep < 90.
Args:
taper_ratio: Taper ratio of the wing (tip_chord / root_chord) [-]
aspect_ratio: Aspect ratio of the wing (b^2 / S) [-]
sweep: Wing quarter-chord sweep angle [deg]
Returns: Oswald's efficiency factor [-]
"""
def f(l): # f(lambda), given as Eq. 36 in the Nita and Scholz paper (see parent docstring).
return (
0.0524 * l ** 4
- 0.15 * l ** 3
+ 0.1659 * l ** 2
- 0.0706 * l
+ 0.0119
)
delta_lambda = -0.357 + 0.45 * np.exp(-0.0375 * sweep)
# Eq. 37 in Nita & Scholz.
# Note: there is a typo in the cited paper; the negative in the exponent was omitted.
# A bit of thinking about this reveals that this omission must be erroneous.
e_theo = 1 / (
1 + f(taper_ratio - delta_lambda) * aspect_ratio
)
fuselage_wake_contraction_correction_factor = 1 - 2 * (fuselage_diameter_to_span_ratio) ** 2
e = e_theo * fuselage_wake_contraction_correction_factor
return e
def optimal_taper_ratio(
sweep=0.,
) -> float:
"""
Computes the optimal (minimum-induced-drag) taper ratio for a given quarter-chord sweep angle.
Based on "Estimating the Oswald Factor from Basic Aircraft Geometrical Parameters"
by M. Nita, D. Scholz; Hamburg Univ. of Applied Sciences, 2012.
Only valid for backwards-swept wings; i.e. 0 <= sweep < 90.
Args:
sweep: Wing quarter-chord sweep angle [deg]
Returns: Optimal taper ratio
"""
return 0.45 * np.exp(-0.0375 * sweep)
def CL_over_Cl(
aspect_ratio: float,
mach: float = 0.,
sweep: float = 0.
) -> float:
"""
Returns the ratio of 3D lift coefficient (with compressibility) to 2D lift coefficient (incompressible).
:param aspect_ratio: Aspect ratio
:param mach: Mach number
:param sweep: Sweep angle [deg]
:return:
"""
beta = np.where(
1 - mach ** 2 >= 0,
np.fmax(1 - mach ** 2, 0) ** 0.5,
0
)
# return aspect_ratio / (aspect_ratio + 2) # Equivalent to equation in Drela's FVA in incompressible, 2*pi*alpha limit.
# return aspect_ratio / (2 + cas.sqrt(4 + aspect_ratio ** 2)) # more theoretically sound at low aspect_ratio
eta = 0.95
return aspect_ratio / (
2 + np.sqrt(
4 + (aspect_ratio * beta / eta) ** 2 * (1 + (np.tand(sweep) / beta) ** 2)
)
) # From Raymer, Sect. 12.4.1; citing DATCOM
def induced_drag_ratio_from_ground_effect(
h_over_b # type: float
):
"""
Gives the ratio of actual induced drag to free-flight induced drag experienced by a wing in ground effect.
Artificially smoothed below around h/b == 0.05 to retain differentiability and practicality.
Source: W. F. Phillips, D. F. Hunsaker, "Lifting-Line Predictions for Induced Drag and Lift in Ground Effect".
Using Equation 5 from the paper, which is modified from a model from Torenbeek:
Torenbeek, E. "Ground Effects", 1982.
:param h_over_b: (Height above ground) divided by (wingspan).
:return: Ratio of induced drag in ground effect to induced drag out of ground effect [unitless]
"""
h_over_b = np.softmax(
h_over_b,
0,
hardness=1 / 0.03
)
return 1 - np.exp(
-4.01 * (2 * h_over_b) ** 0.717
)
| 31.006944 | 123 | 0.620605 |
4a227dc8df23d5a3b73d7ccb32997dba628f0126 | 23,593 | py | Python | src/server/example.py | programmfabrik/easydb-example-plugin | 9ef96f578ccc5b6d7d37eb7cf927a6b908e184f1 | [
"MIT"
] | null | null | null | src/server/example.py | programmfabrik/easydb-example-plugin | 9ef96f578ccc5b6d7d37eb7cf927a6b908e184f1 | [
"MIT"
] | 3 | 2021-12-09T14:13:23.000Z | 2021-12-09T14:13:29.000Z | src/server/example.py | programmfabrik/easydb-example-plugin | 9ef96f578ccc5b6d7d37eb7cf927a6b908e184f1 | [
"MIT"
] | 2 | 2018-03-15T11:24:43.000Z | 2021-04-06T08:41:01.000Z | # coding=utf8
import os
import json
import yaml
from datetime import datetime, date
import calendar
import locale
from dateutil.relativedelta import relativedelta
from time import sleep
from threading import Thread
from context import EasydbException
from context import InvalidValueError
from context import get_json_value
# called from easydb
def easydb_server_start(easydb_context):
# called when server starts (just once)
logger = easydb_context.get_logger('pf.plugin.base.example_plugin')
logger.debug('server_start')
logger.debug('instance information: {0}'.format(json.dumps(easydb_context.get_instance(), indent=4)))
# api callbacks that extend the api
# the api url is <server>/api/plugin/base/<plugin name>/<callback name>
easydb_context.register_callback('api', {
'name': 'echo',
'callback': 'echo'
})
easydb_context.register_callback('api', {
'name': 'config',
'callback': 'config'
})
easydb_context.register_callback('api', {
'name': 'session',
'callback': 'session'
})
easydb_context.register_callback('api', {
'name': 'tmp',
'callback': 'tmp'
})
easydb_context.register_callback('api', {
'name': 'instance',
'callback': 'instance'
})
# register a callback that is called before an object would be saved in the database
# callback registered in the server as 'db_pre_update'
# method name that is called: 'pre_update'
easydb_context.register_callback('db_pre_update', {
'callback': 'pre_update'
})
# register a callback that is called after data was exported
# export objects as YML
easydb_context.register_callback('export_produce', {
'callback': 'export_as_yml'
})
# register a process callback
# check objects for expiration date and send mails
easydb_context.register_callback('process', {
'name': 'check_expiration_date'
})
# register a transition callback
easydb_context.register_callback('transition_action', {
'action_type': 'example_transition_action',
'callback': 'example_transition_action'
})
# helper method to generate a unique id for an object
def generate_unique_id(easydb_context):
# get a unused id from the database and add a (optional) prefix
return easydb_context.next_unique_id_prefixed('medium_nummer', 16, 'medium_')
# helper method to perform a search using an EasydbContext
def perform_search_easydb(easydb_context, query, logger=None):
# get the user id from the current session
user_id = None
try:
session = easydb_context.get_session()
user_id = get_json_value(session, 'user.user._id')
if not isinstance(user_id, int):
logger.error('Could not get user id from session')
return None
except Exception as e:
logger.error('Could not get user id from session: %s' % e)
return None
search_result = easydb_context.search('user', user_id, query)
if logger is not None:
logger.debug('Search Result: %s' % json.dumps(search_result, indent=4))
return search_result
# helper method to perform a search using an EasydbProcessContext
def perform_search_process(easydb_context, connection, session_identifier, query, logger=None):
search_result = easydb_context.search(connection, 'user', session_identifier, query)
if logger is not None:
logger.debug('Search Result: %s' % json.dumps(search_result, indent=4))
return search_result
# helper method that creates a elasticsearch request for the name of a medienart object
def search_for_medienart(easydb_context, name):
logger = easydb_context.get_logger('pf.plugin.base.example_plugin.search_for_medienart')
logger.info('Search for medienart \'%s\'' % name)
# define the search query
search_query = {
'type': 'object',
'generate_rights': False,
'include_fields': ['medienart._id'], # the field that we want to get as a search result
'search': [{
'type': 'in', # search on of the values in 'in'
'bool': 'should',
'fields': ['medienart.name'], # the name of the field that we search for
'in': [name] # list of values that the field should have (only on in this case)
}]
}
logger.debug('Search Request: %s' % json.dumps(search_query, indent=4))
# perform the search and return the result
search_result = perform_search_easydb(easydb_context, search_query, logger)
return search_result
# helper method to create a linked medienart object from the search result
def link_medienart(easydb_context, logger, data, search_result, name):
# get the medienart id from the search result and set it in the object
result_objects = get_json_value(search_result, 'objects')
if isinstance(result_objects, list) and len(result_objects) > 0:
# there should only be on hit, but to be sure iterate through the list of result objects and find the one with the correct name
for k in range(len(result_objects)):
# check if the name is correct and there is a valid id
medienart_name = get_json_value(result_objects[k], 'medienart.name')
if isinstance(medienart_name, unicode) and medienart_name == unicode(name):
medienart_id = get_json_value(result_objects[k], 'medienart._id')
if isinstance(medienart_id, int):
# the medienart id is valid, add a linked object to the data
data['medium']['medienart'] = {
'medienart': {
'_id': medienart_id
},
'_objecttype': 'medienart',
'_mask': '_all_fields'
}
logger.debug('link object %d with medienart id %d' % (k, medienart_id))
return data
return data
# method for the 'db_pre_update' callback
# this method should be used to check the validaty of the object data before saving
def pre_update(easydb_context, easydb_info):
# get a logger
logger = easydb_context.get_logger('pf.plugin.base.example_plugin.pre_update')
logger.info('pre_update was called')
# get the object data
data = get_json_value(easydb_info, 'data')
logger.debug('%d Objects' % len(data))
# check the data, and if there is invalid data, throw an InvalidValueError
for i in range(len(data)):
# check if the objecttype is set
if '_objecttype' not in data[i]:
continue
# check if the objecttype is correct
if data[i]['_objecttype'] != 'medium':
logger.debug('Ignoring object type %s' % data[i]['_objecttype'])
continue
# depending on the mask, check if mandatory fields are set and set the linked object medienart
if data[i]['_mask'] == 'medium_cd':
logger.debug('Checking mandatory fields for \'CD\'')
spieldauer_min = get_json_value(data[i], 'medium.spieldauer_min')
# check if the fields are valid
if spieldauer_min is None or not isinstance(spieldauer_min, int) or spieldauer_min <= 0:
raise InvalidValueError('spieldauer_min', str(spieldauer_min), 'integer > 0')
# format the time to hh:mm:ss. the decimal number is defined as an integer, so divide the value by 100 to get seconds
hours, remainder = divmod(int(float(spieldauer_min) / 100.0), 3600)
minutes, seconds = divmod(remainder, 60)
data[i]['medium']['spieldauer'] = '%02d:%02d:%02d' % (hours, minutes, seconds)
# set the linked object medienart with the value 'CD'
# perform an elasticsearch request to get the id of the medienart object
search_result = search_for_medienart(easydb_context, 'CD')
data[i] = link_medienart(easydb_context, logger, data[i], search_result, 'CD')
elif data[i]['_mask'] == 'medium_buch':
logger.debug('Checking mandatory fields for \'Buch\'')
seitenzahl = get_json_value(data[i], 'medium.seitenzahl')
# check if the fields are valid
if seitenzahl is None or not isinstance(seitenzahl, int) or seitenzahl <= 0:
raise InvalidValueError('seitenzahl', str(seitenzahl), 'integer > 0')
# set the linked object medienart with the value 'Buch'
# perform an elasticsearch request to get the id of the medienart object
search_result = search_for_medienart(easydb_context, 'Buch')
data[i] = link_medienart(easydb_context, logger, data[i], search_result, 'Buch')
# to avoid confusion with masks and read/write settings in masks, always use the _all_fields mask
data[i]['_mask'] = '_all_fields'
# generate a unique id for this object, if there is none (when the object was just created)
if get_json_value(data[i], 'medium.identifier') is None:
new_id = str(generate_unique_id(easydb_context))
logger.debug('Generating new ID for Object %d: %s' % (i, new_id))
data[i]['medium']['identifier'] = new_id
# always return if no exception was thrown, so the server and frontend are not blocked
logger.debug('pre_update function returns following data to the database: %s'
% json.dumps(data, indent=4))
return data
# called after data was exported
# load the exported json files and save the content as YML
def export_as_yml(easydb_context, parameters):
logger = easydb_context.get_logger('pf.plugin.base.example_plugin.export_as_yml')
# get the exporter definition
exporter = easydb_context.get_exporter()
# check if the export definition fits to this plugin
produce_options = get_json_value(exporter.getExport(), 'export.produce_options', False)
if str(get_json_value(produce_options, 'plugin', False)) != 'example_export':
return
# check if the produce option for exporting the YML with or without tags is a boolean, else set it to false
with_tags = get_json_value(produce_options, 'with_tags', False)
if not isinstance(with_tags, bool):
with_tags = False
# load exported files (need to be exported as JSON files)
export_dir = exporter.getFilesPath()
logger.debug('Export Dir: %s' % export_dir)
files = exporter.getFiles()
if not isinstance(files, list) or len(files) < 1:
logger.warn('No valid file list!')
return
# iterate over the definitions of the exported files and parse the json content
for f in files:
file_path = str(get_json_value(f, 'path', False))
if file_path.lower().endswith('.json'):
# absolute path to the original file
file_path = os.path.abspath(export_dir + '/' + file_path)
# path of the new file
file_name = str(f['path'].split('.')[0] + '.yml')
logger.debug('Converting JSON file %s to YML' % file_path)
try:
# load and parse the json file
file = open(file_path, 'r')
content = json.loads(file.read().decode('utf-8'))
file.close()
# convert the objects that are defined in a json array to YML and save it in a file next to the original file
objects = get_json_value(content, 'objects', False)
if isinstance(objects, list) and len(objects) > 0:
# save the file in the temporary folder and add it later to the exported files
tmp_filename = os.path.abspath('%s/../tmp/objects.yml' % export_dir)
with open(tmp_filename, 'w') as yml_file:
# define the final dict that will be converted to YML
object_output = {
'objects': objects
}
# depending on the produce options, export the YML with or without tags
if with_tags:
yaml.dump(object_output, yml_file, default_flow_style=False)
else:
yaml.safe_dump(object_output, yml_file, default_flow_style=False)
yml_file.close()
logger.debug('Saved objects as %s' % tmp_filename)
# add the new YML file to the export so it can be opened or downloaded from the frontend
exporter.addFile(tmp_filename, file_name)
# remove the old JSON file
exporter.removeFile(f['path'])
else:
logger.debug('Found no \'objects\' array')
except Exception as e:
logger.warn('Could not convert JSON to YML: %s' % str(e))
# run method to start threads for process plugins
# method is called at server start
def run(easydb_context):
logger = easydb_context.get_logger('pf.plugin.base.example_plugin.process')
logger.info('run')
# set up a thread that runs once a day and checks if the expiration
t = Thread(target=check_expiration_date, args=(easydb_context,))
t.start()
t.join()
# repeatedly check the expiration date of objects
# easydb_context: EasydbProcessContext
def check_expiration_date(easydb_context):
logger = easydb_context.get_logger('pf.plugin.base.example_plugin.check_expiration_date')
# connect to the database
connection = easydb_context.db_connect('check_expiration_date')
# perform a check if the objecttype 'medium' exists
sql = """
SELECT EXISTS(
SELECT * FROM information_schema.tables
WHERE table_schema = 'public' AND table_name = 'medium'
) AS medium_exists;
"""
# perform the request and save the result
connection.cursor().execute(sql)
result = connection.cursor().fetchone()
if result['medium_exists'] != u't':
logger.debug('objecttype \'medium\' does not exist, skip')
return
# load the configuration
config = easydb_context.get_config(connection)
# search all objects of the type 'medium', using a SQL query, where the expiration data is in less then a week
while True:
# create and format a date that is 7 days in the future
days_in_future = 7
date = datetime.now() + relativedelta(days=days_in_future)
date_str = date.strftime('%Y-%m-%d')
# build the Postgres statement
sql = """
SELECT m."id:pkey", m.titel, m.identifier, m.ablaufdatum, m.":owner:ez_user:id",
u.login, u.name, u.firstname, u.displayname, u.frontend_language,
e.address
FROM medium m JOIN ez_user u ON m.":owner:ez_user:id" = u."ez_user:id"
AND ablaufdatum <= '%s'
JOIN "ez_user:email" e ON e."ez_user:id" = u."ez_user:id"
AND e.is_primary AND e.send_email AND address IS NOT NULL;
"""
# perform the request and save the result
connection.cursor().execute(sql % date_str)
result = connection.cursor().fetchall()
mails_to_send = {}
logger.debug('%s results found' % len(result))
for row in result:
try:
# information about the object
identifier = row['identifier']
titel = row['titel']
ablaufdatum = datetime.strptime(row['ablaufdatum'], '%Y-%m-%d')
# mail address
address = row['address'] if len(row['address']) else None
if address is None:
continue
# user information
user_displayname = row['displayname']
if user_displayname is None or len(user_displayname) < 1:
user_displayname = ''
user_displayname += row['firstname'] if row['firstname'] is not None else ''
user_displayname += ' ' if len(user_displayname) > 0 else ''
user_displayname += row['name'] if row['name'] is not None else ''
if len(user_displayname) < 1:
user_displayname = row['login'] if row['login'] is not None else ''
if len(user_displayname) < 1:
user_displayname = address
# set the locale according to the user language
user_lang = row['frontend_language']
# write the text for the mail in german or english
# TODO get the l10n translations from the server
mail_text = None
if user_lang == 'de-DE':
locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
mail_text = 'Medium %s (%s) laeuft am %s ab' % (
identifier,
titel,
'%s, den %s' % (
calendar.day_name[ablaufdatum.weekday()],
ablaufdatum.strftime('%d.%m.%Y')
)
)
else:
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
mail_text = 'Medium %s (%s) expires %s' % (
identifier,
titel,
'%s, %s' % (
calendar.day_name[ablaufdatum.weekday()],
ablaufdatum.strftime('%Y-%m-%d')
)
)
logger.info(mail_text)
if not address in mails_to_send:
mails_to_send[address] = {
'language': user_lang,
'name': user_displayname,
'mail_text': []
}
mails_to_send[address]['mail_text'].append(mail_text)
except Exception as e:
raise e
for adr in mails_to_send:
mail = None
if mails_to_send[adr]['language'] == 'de-DE':
mail = 'Hallo %s,\n\ndie folgenden Objekte laufen innerhalb der naechsten %d Tage ab:\n\n%s\n\nMit freundlichen Gruessen'
else:
mail = 'Hello %s,\n\nthe following objects expire during the next %d days:\n\n%s\n\nRegards'
logger.debug('Mail to %s:\n%s' % (
adr, mail % (
mails_to_send[adr]['name'],
days_in_future,
'\n - '.join(mails_to_send[adr]['mail_text'])
)
))
# TODO send the mail instead of logging the mail text
# log a custom event to add the sending of the mail to the event log of the server
db_name = get_json_value(easydb_context.get_instance(), 'db-name')
db_conn = easydb_context.db_connect(db_name)
easydb_context.log_event(
db_conn,
'EASYDB_EXAMPLE_PLUGIN_EVENT',
{
'medium_id': identifier,
'date': ablaufdatum.strftime('%Y-%m-%d'),
'title': titel,
'owner_name': mails_to_send[adr]['name']
}
)
db_conn.commit()
# sleep for one hour
sleep(60 * 60)
# TODO dont send more then one mail for each object
# example transition action
# write the current timestamp into a text field
# assume that the objects are of objecttype 'obj' and have a text field 'timestamp'
# this function is only called if there are transitions (for INSERT/UPDATE) on this objecttype with this plugin action set
def example_transition_action(easydb_context, data):
logger = easydb_context.get_logger('pf.plugin.base.example_plugin.example_transition_action')
if not 'data' in data:
return []
objects = data['data']
for i in range(len(objects)):
# check for the correct objecttype
if not 'obj' in objects[i]:
continue
# set the value in the object
objects[i]['obj']['timestamp'] = str(datetime.now())
logger.debug('set timestamp for object %d: %s' % (objects[i]['obj']['_id'],
objects[i]['obj']['timestamp']))
return objects
# method to cleanup process plugin resources before the server stops
def stop(easydb_context):
logger = easydb_context.get_logger('pf.plugin.base.example_plugin.process')
logger.info('stop')
# method that is called when API Endpoint <server>/api/plugin/base/example-plugin/config is called
def config(easydb_context, parameters):
return json_response(easydb_context.get_config())
# method that is called when API Endpoint <server>/api/plugin/base/example-plugin/session is called
def session(easydb_context, parameters):
return json_response(easydb_context.get_session())
# method that is called when API Endpoint <server>/api/plugin/base/example-plugin/tmp is called
def tmp(easydb_context, parameters):
tmp_dir = easydb_context.get_temp_dir()
return text_response('temp dir: {0}'.format(tmp_dir))
# method that is called when API Endpoint <server>/api/plugin/base/example-plugin/instance is called
def instance(easydb_context, parameters):
instance = easydb_context.get_instance()
return json_response(easydb_context.get_instance())
# method that is called when API Endpoint <server>/api/plugin/base/example-plugin/echo is called
def echo(easydb_context, parameters):
status_code = 200
content_type = '<undefined>'
lines = []
lines.append('*** Request Information ***')
lines.append('')
lines.append(u'{0} {1}'.format(parameters['method'], parameters['path']))
query_string = parameters['query_string']
if len(query_string) > 0:
lines.append('Query String Parameters:')
for part in query_string.split('&'):
part_parts = part.split('=')
key = part_parts[0]
if len(part_parts) > 1:
value = part_parts[1]
else:
value = '<undefined>'
lines.append(u'* {0} = {1}'.format(key, value))
if key == 'status_code':
status_code = int(value)
else:
lines.append('Query String: <empty>')
lines.append('')
lines.append('Headers:')
for key, value in parameters['headers'].items():
lines.append('* {0}: {1}'.format(key, value))
if key.lower() == 'content-type':
content_type = value
lines.append('')
body = parameters['body']
if len(body) > 0:
if 'text' in content_type or 'json' in content_type:
lines.append('Body:')
lines.append('')
lines.append(body)
else:
lines.append('Body: {0} bytes'.format(len(body)))
else:
lines.append('Body: none')
lines.append('')
return text_response('\n'.join(lines), status_code=status_code)
def json_response(js):
return {
'status_code': 200,
'body': json.dumps(js, indent=4),
'headers': {
'Content-Type': 'application/json; charset=utf-8'
}
}
def text_response(text, status_code=200):
return {
'status_code': status_code,
'body': text,
'headers': {
'Content-Type': 'text/plain; charset=utf-8'
}
}
| 39.126036 | 137 | 0.606917 |
4a227e14ac7f5516c2e8700657eb9fc42deabee0 | 17,827 | py | Python | examples/dbm_cifar_naive.py | praisethemoon/boltzmann-machines | bc49ba2c8c6c894af55b272e1b92f9cea3576136 | [
"MIT"
] | 196 | 2019-03-16T14:50:49.000Z | 2022-03-31T03:24:00.000Z | examples/dbm_cifar_naive.py | praisethemoon/boltzmann-machines | bc49ba2c8c6c894af55b272e1b92f9cea3576136 | [
"MIT"
] | 6 | 2019-04-09T07:33:01.000Z | 2019-11-27T21:37:37.000Z | examples/dbm_cifar_naive.py | praisethemoon/boltzmann-machines | bc49ba2c8c6c894af55b272e1b92f9cea3576136 | [
"MIT"
] | 49 | 2019-03-16T14:51:04.000Z | 2022-03-10T13:47:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Train 3072-5000-1000 Gaussian-Bernoulli-Multinomial
DBM with pre-training on "smoothed" CIFAR-10 (with 1000 least
significant singular values removed), as suggested in [1].
Per sample validation mean reconstruction error for DBM monotonically
decreases during training from ~0.99 to (only) ~0.5 after 1500 epochs.
The training took approx. 47m + 119m + 22h 40m ~ 1d 1h 30m on GTX 1060.
Note that DBM is trained without centering.
After models are trained, Gaussian RBM is discriminatively fine-tuned.
It achieves 59.78% accuracy on a test set.
References
----------
[1] A. Krizhevsky and G. Hinton. Learning multiple layers of features
from tine images. 2009.
"""
print __doc__
import os
import argparse
import numpy as np
from scipy.linalg import svd
from keras import regularizers
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.initializers import glorot_uniform
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, BatchNormalization as BN
from sklearn.metrics import accuracy_score
import env
from boltzmann_machines import DBM
from boltzmann_machines.rbm import GaussianRBM, MultinomialRBM
from boltzmann_machines.utils import (RNG, Stopwatch,
one_hot, one_hot_decision_function, unhot)
from boltzmann_machines.utils.dataset import load_cifar10
from boltzmann_machines.utils.optimizers import MultiAdam
def make_smoothing(X_train, n_train, args):
X_s = None
X_s_path = os.path.join(args.data_path, 'X_s.npy')
do_smoothing = True
if os.path.isfile(X_s_path):
print "\nLoading smoothed data ..."
X_s = np.load(X_s_path)
print "Checking augmented data ..."
if len(X_s) == n_train:
do_smoothing = False
if do_smoothing:
print "\nSmoothing data ..."
X_m = X_train.mean(axis=0)
X_train -= X_m
with Stopwatch(verbose=True) as s:
[U, s, Vh] = svd(X_train,
full_matrices=False,
compute_uv=True,
overwrite_a=True,
check_finite=False)
s[-1000:] = 0.
X_s = U.dot(np.diag(s).dot(Vh))
X_s += X_m
# save to disk
np.save(X_s_path, X_s)
print "\n"
return X_s
def make_grbm((X_train, X_val), args):
if os.path.isdir(args.grbm_dirpath):
print "\nLoading G-RBM ...\n\n"
grbm = GaussianRBM.load_model(args.grbm_dirpath)
else:
print "\nTraining G-RBM ...\n\n"
grbm = GaussianRBM(n_visible=32 * 32 * 3,
n_hidden=5000,
sigma=1.,
W_init=0.0008,
vb_init=0.,
hb_init=0.,
n_gibbs_steps=args.n_gibbs_steps[0],
learning_rate=args.lr[0],
momentum=np.geomspace(0.5, 0.9, 8),
max_epoch=args.epochs[0],
batch_size=args.batch_size[0],
l2=args.l2[0],
sample_v_states=True,
sample_h_states=True,
sparsity_cost=0.,
dbm_first=True, # !!!
metrics_config=dict(
msre=True,
feg=True,
train_metrics_every_iter=1000,
val_metrics_every_epoch=2,
feg_every_epoch=2,
n_batches_for_feg=50,
),
verbose=True,
display_filters=12,
display_hidden_activations=24,
v_shape=(32, 32, 3),
dtype='float32',
tf_saver_params=dict(max_to_keep=1),
model_path=args.grbm_dirpath)
grbm.fit(X_train, X_val)
return grbm
def make_mrbm((Q_train, Q_val), args):
if os.path.isdir(args.mrbm_dirpath):
print "\nLoading M-RBM ...\n\n"
mrbm = MultinomialRBM.load_model(args.mrbm_dirpath)
else:
print "\nTraining M-RBM ...\n\n"
mrbm = MultinomialRBM(n_visible=5000,
n_hidden=1000,
n_samples=1000,
W_init=0.01,
hb_init=0.,
vb_init=0.,
n_gibbs_steps=args.n_gibbs_steps[1],
learning_rate=args.lr[1],
momentum=np.geomspace(0.5, 0.9, 8),
max_epoch=args.epochs[1],
batch_size=args.batch_size[1],
l2=args.l2[1],
sample_h_states=True,
sample_v_states=False,
sparsity_cost=0.,
dbm_last=True, # !!!
metrics_config=dict(
msre=True,
pll=True,
feg=True,
train_metrics_every_iter=400,
val_metrics_every_epoch=2,
feg_every_epoch=2,
n_batches_for_feg=50,
),
verbose=True,
display_filters=0,
display_hidden_activations=100,
random_seed=1337,
dtype='float32',
tf_saver_params=dict(max_to_keep=1),
model_path=args.mrbm_dirpath)
mrbm.fit(Q_train, Q_val)
return mrbm
def make_rbm_transform(rbm, X, path, np_dtype=None):
H = None
transform = True
if os.path.isfile(path):
H = np.load(path)
if len(X) == len(H):
transform = False
if transform:
H = rbm.transform(X, np_dtype=np_dtype)
np.save(path, H)
return H
def make_dbm((X_train, X_val), rbms, (Q, G), args):
if os.path.isdir(args.dbm_dirpath):
print "\nLoading DBM ...\n\n"
dbm = DBM.load_model(args.dbm_dirpath)
dbm.load_rbms(rbms) # !!!
else:
print "\nTraining DBM ...\n\n"
dbm = DBM(rbms=rbms,
n_particles=args.n_particles,
v_particle_init=X_train[:args.n_particles].copy(),
h_particles_init=(Q[:args.n_particles].copy(),
G[:args.n_particles].copy()),
n_gibbs_steps=args.n_gibbs_steps[2],
max_mf_updates=args.max_mf_updates,
mf_tol=args.mf_tol,
learning_rate=np.geomspace(args.lr[2], 1e-5, args.epochs[2]),
momentum=np.geomspace(0.5, 0.9, 10),
max_epoch=args.epochs[2],
batch_size=args.batch_size[2],
l2=args.l2[2],
max_norm=args.max_norm,
sample_v_states=True,
sample_h_states=(True, True),
sparsity_cost=0.,
train_metrics_every_iter=1000,
val_metrics_every_epoch=2,
random_seed=args.random_seed[2],
verbose=True,
save_after_each_epoch=True,
display_filters=12,
display_particles=36,
v_shape=(32, 32, 3),
dtype='float32',
tf_saver_params=dict(max_to_keep=1),
model_path=args.dbm_dirpath)
dbm.fit(X_train, X_val)
return dbm
def make_mlp((X_train, y_train), (X_val, y_val), (X_test, y_test),
(W, hb), args):
dense_params = {}
if W is not None and hb is not None:
dense_params['weights'] = (W, hb)
# define and initialize MLP model
mlp = Sequential([
Dense(5000, input_shape=(3 * 32 * 32,),
kernel_regularizer=regularizers.l2(args.mlp_l2),
kernel_initializer=glorot_uniform(seed=3333),
**dense_params),
BN(),
Activation('relu'),
Dropout(args.mlp_dropout, seed=4444),
Dense(10, kernel_initializer=glorot_uniform(seed=5555)),
Activation('softmax'),
])
mlp.compile(optimizer=MultiAdam(lr=0.001,
lr_multipliers={'dense_1': args.mlp_lrm[0],
'dense_2': args.mlp_lrm[1]}),
loss='categorical_crossentropy',
metrics=['accuracy'])
# train and evaluate classifier
with Stopwatch(verbose=True) as s:
early_stopping = EarlyStopping(monitor=args.mlp_val_metric, patience=12, verbose=2)
reduce_lr = ReduceLROnPlateau(monitor=args.mlp_val_metric, factor=0.2, verbose=2,
patience=6, min_lr=1e-5)
callbacks = [early_stopping, reduce_lr]
try:
mlp.fit(X_train, one_hot(y_train, n_classes=10),
epochs=args.mlp_epochs,
batch_size=args.mlp_batch_size,
shuffle=False,
validation_data=(X_val, one_hot(y_val, n_classes=10)),
callbacks=callbacks)
except KeyboardInterrupt:
pass
y_pred = mlp.predict(X_test)
y_pred = unhot(one_hot_decision_function(y_pred), n_classes=10)
print "Test accuracy: {:.4f}".format(accuracy_score(y_test, y_pred))
# save predictions, targets, and fine-tuned weights
np.save(args.mlp_save_prefix + 'y_pred.npy', y_pred)
np.save(args.mlp_save_prefix + 'y_test.npy', y_test)
W_finetuned, _ = mlp.layers[0].get_weights()
np.save(args.mlp_save_prefix + 'W_finetuned.npy', W_finetuned)
def main():
# training settings
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# general
parser.add_argument('--gpu', type=str, default='0', metavar='ID',
help="ID of the GPU to train on (or '' to train on CPU)")
# data
parser.add_argument('--n-train', type=int, default=49000, metavar='N',
help='number of training examples')
parser.add_argument('--n-val', type=int, default=1000, metavar='N',
help='number of validation examples')
parser.add_argument('--data-path', type=str, default='../data/', metavar='PATH',
help='directory for storing augmented data etc.')
# common for RBMs and DBM
parser.add_argument('--n-gibbs-steps', type=int, default=(1, 1, 1), metavar='N', nargs='+',
help='(initial) number of Gibbs steps for CD/PCD')
parser.add_argument('--lr', type=float, default=(5e-4, 1e-4, 8e-5), metavar='LR', nargs='+',
help='(initial) learning rates')
parser.add_argument('--epochs', type=int, default=(120, 180, 1500), metavar='N', nargs='+',
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=(100, 100, 100), metavar='B', nargs='+',
help='input batch size for training, `--n-train` and `--n-val`' + \
'must be divisible by this number (for DBM)')
parser.add_argument('--l2', type=float, default=(0.01, 0.05, 1e-8), metavar='L2', nargs='+',
help='L2 weight decay coefficients')
parser.add_argument('--random-seed', type=int, default=(1337, 1111, 2222), metavar='N', nargs='+',
help='random seeds for models training')
# save dirpaths
parser.add_argument('--grbm-dirpath', type=str, default='../models/grbm_cifar_naive/', metavar='DIRPATH',
help='directory path to save Gaussian RBM')
parser.add_argument('--mrbm-dirpath', type=str, default='../models/mrbm_cifar_naive/', metavar='DIRPATH',
help='directory path to save Multinomial RBM')
parser.add_argument('--dbm-dirpath', type=str, default='../models/dbm_cifar_naive/', metavar='DIRPATH',
help='directory path to save DBM')
# DBM related
parser.add_argument('--n-particles', type=int, default=100, metavar='M',
help='number of persistent Markov chains')
parser.add_argument('--max-mf-updates', type=int, default=50, metavar='N',
help='maximum number of mean-field updates per weight update')
parser.add_argument('--mf-tol', type=float, default=1e-11, metavar='TOL',
help='mean-field tolerance')
parser.add_argument('--max-norm', type=float, default=4., metavar='C',
help='maximum norm constraint')
# MLP related
parser.add_argument('--mlp-no-init', action='store_true',
help='if enabled, use random initialization')
parser.add_argument('--mlp-l2', type=float, default=1e-4, metavar='L2',
help='L2 weight decay coefficient')
parser.add_argument('--mlp-lrm', type=float, default=(0.1, 1.), metavar='LRM', nargs='+',
help='learning rate multipliers of 1e-3')
parser.add_argument('--mlp-epochs', type=int, default=100, metavar='N',
help='number of epochs to train')
parser.add_argument('--mlp-val-metric', type=str, default='val_acc', metavar='S',
help="metric on validation set to perform early stopping, {'val_acc', 'val_loss'}")
parser.add_argument('--mlp-batch-size', type=int, default=128, metavar='N',
help='input batch size for training')
parser.add_argument('--mlp-dropout', type=float, default=0.64, metavar='P',
help='probability of visible units being set to zero')
parser.add_argument('--mlp-save-prefix', type=str, default='../data/grbm_naive_', metavar='PREFIX',
help='prefix to save MLP predictions and targets')
# parse and check params
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
for x, m in (
(args.n_gibbs_steps, 3),
(args.lr, 3),
(args.epochs, 3),
(args.batch_size, 3),
(args.l2, 3),
(args.random_seed, 3),
):
if len(x) == 1:
x *= m
# prepare data (load + scale + split)
print "\nPreparing data ..."
X, y = load_cifar10(mode='train', path=args.data_path)
X = X.astype(np.float32)
X /= 255.
RNG(seed=42).shuffle(X)
RNG(seed=42).shuffle(y)
n_train = min(len(X), args.n_train)
n_val = min(len(X), args.n_val)
X_train = X[:n_train]
X_val = X[-n_val:]
y_train = y[:n_train]
y_val = y[-n_val:]
# remove 1000 least significant singular values
X_train = make_smoothing(X_train, n_train, args)
print X_train.shape
# center and normalize training data
X_s_mean = X_train.mean(axis=0)
X_s_std = X_train.std(axis=0)
mean_path = os.path.join(args.data_path, 'X_s_mean.npy')
std_path = os.path.join(args.data_path, 'X_s_std.npy')
if not os.path.isfile(mean_path):
np.save(mean_path, X_s_mean)
if not os.path.isfile(std_path):
np.save(std_path, X_s_std)
X_train -= X_s_mean
X_train /= X_s_std
X_val -= X_s_mean
X_val /= X_s_std
print "Mean: ({0:.3f}, ...); std: ({1:.3f}, ...)".format(X_train.mean(axis=0)[0],
X_train.std(axis=0)[0])
print "Range: ({0:.3f}, {1:.3f})\n\n".format(X_train.min(), X_train.max())
# pre-train Gaussian RBM
grbm = make_grbm((X_train, X_val), args)
# extract features Q = p_{G-RBM}(h|v=X)
print "\nExtracting features from G-RBM ...\n\n"
Q_train, Q_val = None, None
if not os.path.isdir(args.mrbm_dirpath) or not os.path.isdir(args.dbm_dirpath):
Q_train_path = os.path.join(args.data_path, 'Q_train_cifar_naive.npy')
Q_train = make_rbm_transform(grbm, X_train, Q_train_path)
if not os.path.isdir(args.mrbm_dirpath):
Q_val_path = os.path.join(args.data_path, 'Q_val_cifar_naive.npy')
Q_val = make_rbm_transform(grbm, X_val, Q_val_path)
# pre-train Multinomial RBM (M-RBM)
mrbm = make_mrbm((Q_train, Q_val), args)
# extract features G = p_{M-RBM}(h|v=Q)
print "\nExtracting features from M-RBM ...\n\n"
Q, G = None, None
if not os.path.isdir(args.dbm_dirpath):
Q = Q_train[:args.n_particles]
G_path = os.path.join(args.data_path, 'G_train_cifar_naive.npy')
G = make_rbm_transform(mrbm, Q, G_path)
# jointly train DBM
dbm = make_dbm((X_train, X_val), (grbm, mrbm), (Q, G), args)
# load test data
X_test, y_test = load_cifar10(mode='test', path=args.data_path)
X_test /= 255.
X_test -= X_s_mean
X_test /= X_s_std
# G-RBM discriminative fine-tuning:
# initialize MLP with learned weights,
# add FC layer and train using backprop
print "\nG-RBM Discriminative fine-tuning ...\n\n"
W, hb = None, None
if not args.mlp_no_init:
weights = grbm.get_tf_params(scope='weights')
W = weights['W']
hb = weights['hb']
make_mlp((X_train, y_train), (X_val, y_val), (X_test, y_test),
(W, hb), args)
if __name__ == '__main__':
main() | 41.945882 | 109 | 0.550008 |
4a227e1a437c963a17bf2333309a28c951bd25db | 6,334 | py | Python | hoomd/md/pytest/test_dihedral.py | pabloferz/hoomd-blue | 9a27f63b9243b8a3a04ccd3047f686cb0e12ec31 | [
"BSD-3-Clause"
] | null | null | null | hoomd/md/pytest/test_dihedral.py | pabloferz/hoomd-blue | 9a27f63b9243b8a3a04ccd3047f686cb0e12ec31 | [
"BSD-3-Clause"
] | null | null | null | hoomd/md/pytest/test_dihedral.py | pabloferz/hoomd-blue | 9a27f63b9243b8a3a04ccd3047f686cb0e12ec31 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
import hoomd
import pytest
import numpy as np
_harmonic_args = {
'k': [3.0, 10.0, 5.0],
'd': [-1, 1, 1],
'n': [2, 1, 3],
'phi0': [np.pi / 2, np.pi / 4, np.pi / 6]
}
_harmonic_arg_list = [(hoomd.md.dihedral.Harmonic,
dict(zip(_harmonic_args, val)))
for val in zip(*_harmonic_args.values())]
_OPLS_args = {
'k1': [1.0, 0.5, 2.0],
'k2': [1.5, 2.5, 1.0],
'k3': [0.5, 1.5, 0.25],
'k4': [0.75, 1.0, 3.5]
}
_OPLS_arg_list = [(hoomd.md.dihedral.OPLS, dict(zip(_OPLS_args, val)))
for val in zip(*_OPLS_args.values())]
def get_dihedral_and_args():
return _harmonic_arg_list + _OPLS_arg_list
def get_dihedral_args_forces_and_energies():
harmonic_forces = [0.0, 5.0, 1.9411]
harmonic_energies = [3.0, 5.0, 0.0852]
OPLS_forces = [-0.616117, -0.732233, -0.0277282]
OPLS_energies = [2.42678, 2.89645, 5.74372]
harmonic_args_and_vals = []
OPLS_args_and_vals = []
for i in range(3):
harmonic_args_and_vals.append(
(_harmonic_arg_list[i][0], _harmonic_arg_list[i][1],
harmonic_forces[i], harmonic_energies[i]))
OPLS_args_and_vals.append((_OPLS_arg_list[i][0], _OPLS_arg_list[i][1],
OPLS_forces[i], OPLS_energies[i]))
return harmonic_args_and_vals + OPLS_args_and_vals
@pytest.fixture(scope='session')
def dihedral_snapshot_factory(device):
def make_snapshot(d=1.0, phi_deg=45, particle_types=['A'], L=20):
phi_rad = phi_deg * (np.pi / 180)
# the central particles are along the x-axis, so phi is determined from
# the angle in the yz plane.
s = hoomd.Snapshot(device.communicator)
N = 4
if s.communicator.rank == 0:
box = [L, L, L, 0, 0, 0]
s.configuration.box = box
s.particles.N = N
s.particles.types = particle_types
# shift particle positions slightly in z so MPI tests pass
s.particles.position[:] = [
[0.0, d * np.cos(phi_rad / 2), d * np.sin(phi_rad / 2) + 0.1],
[0.0, 0.0, 0.1], [d, 0.0, 0.1],
[d, d * np.cos(phi_rad / 2), -d * np.sin(phi_rad / 2) + 0.1]
]
s.dihedrals.N = 1
s.dihedrals.types = ['dihedral']
s.dihedrals.typeid[0] = 0
s.dihedrals.group[0] = (0, 1, 2, 3)
return s
return make_snapshot
@pytest.mark.parametrize("dihedral_cls, potential_kwargs",
get_dihedral_and_args())
def test_before_attaching(dihedral_cls, potential_kwargs):
dihedral_potential = dihedral_cls()
dihedral_potential.params['dihedral'] = potential_kwargs
for key in potential_kwargs:
np.testing.assert_allclose(dihedral_potential.params['dihedral'][key],
potential_kwargs[key],
rtol=1e-6)
@pytest.mark.parametrize("dihedral_cls, potential_kwargs",
get_dihedral_and_args())
def test_after_attaching(dihedral_snapshot_factory, simulation_factory,
dihedral_cls, potential_kwargs):
snap = dihedral_snapshot_factory(d=0.969, L=5)
sim = simulation_factory(snap)
dihedral_potential = dihedral_cls()
dihedral_potential.params['dihedral'] = potential_kwargs
integrator = hoomd.md.Integrator(dt=0.005)
integrator.forces.append(dihedral_potential)
langevin = hoomd.md.methods.Langevin(kT=1,
filter=hoomd.filter.All(),
alpha=0.1)
integrator.methods.append(langevin)
sim.operations.integrator = integrator
sim.run(0)
for key in potential_kwargs:
np.testing.assert_allclose(dihedral_potential.params['dihedral'][key],
potential_kwargs[key],
rtol=1e-6)
@pytest.mark.parametrize("dihedral_cls, potential_kwargs, force, energy",
get_dihedral_args_forces_and_energies())
def test_forces_and_energies(dihedral_snapshot_factory, simulation_factory,
dihedral_cls, potential_kwargs, force, energy):
phi_deg = 45
phi_rad = phi_deg * (np.pi / 180)
snap = dihedral_snapshot_factory(phi_deg=phi_deg)
sim = simulation_factory(snap)
# the dihedral angle is in yz plane, thus no force along x axis
force_array = force * np.asarray(
[0, np.sin(-phi_rad / 2), np.cos(-phi_rad / 2)])
dihedral_potential = dihedral_cls()
dihedral_potential.params['dihedral'] = potential_kwargs
integrator = hoomd.md.Integrator(dt=0.005)
integrator.forces.append(dihedral_potential)
langevin = hoomd.md.methods.Langevin(kT=1,
filter=hoomd.filter.All(),
alpha=0.1)
integrator.methods.append(langevin)
sim.operations.integrator = integrator
sim.run(0)
sim_energies = sim.operations.integrator.forces[0].energies
sim_forces = sim.operations.integrator.forces[0].forces
if sim.device.communicator.rank == 0:
np.testing.assert_allclose(sum(sim_energies),
energy,
rtol=1e-2,
atol=1e-5)
np.testing.assert_allclose(sim_forces[0],
force_array,
rtol=1e-2,
atol=1e-5)
np.testing.assert_allclose(sim_forces[1],
-1 * force_array,
rtol=1e-2,
atol=1e-5)
np.testing.assert_allclose(sim_forces[2],
[0, -1 * force_array[1], force_array[2]],
rtol=1e-2,
atol=1e-5)
np.testing.assert_allclose(sim_forces[3],
[0, force_array[1], -1 * force_array[2]],
rtol=1e-2,
atol=1e-5)
| 37.258824 | 79 | 0.558099 |
4a227eee04223bc662783a7b415ed10c4efa817b | 597 | py | Python | pylearn2/devtools/tests/test_via_pyflakes.py | Menerve/pylearn2 | ad7bcfda3294404aebd71f5a5c4a8623d401a98e | [
"BSD-3-Clause"
] | 3 | 2016-01-23T10:18:39.000Z | 2019-02-28T06:22:45.000Z | pylearn2/devtools/tests/test_via_pyflakes.py | Menerve/pylearn2 | ad7bcfda3294404aebd71f5a5c4a8623d401a98e | [
"BSD-3-Clause"
] | null | null | null | pylearn2/devtools/tests/test_via_pyflakes.py | Menerve/pylearn2 | ad7bcfda3294404aebd71f5a5c4a8623d401a98e | [
"BSD-3-Clause"
] | null | null | null | from pylearn2.devtools.run_pyflakes import run_pyflakes
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
def test_via_pyflakes():
d = run_pyflakes(True)
if len(d.keys()) != 0:
print 'Errors detected by pyflakes'
for key in d.keys():
print key+':'
for l in d[key].split('\n'):
print '\t',l
raise AssertionError("You have errors detected by pyflakes")
| 31.421053 | 68 | 0.656616 |
4a227f2a672ed61a67015f5f1cdadad23101d97f | 11,535 | py | Python | qa/pull-tester/rpc-tests.py | uscoin-project/uscoin | 6a459cc884133ba881c7ededce95320466bd4c07 | [
"MIT"
] | null | null | null | qa/pull-tester/rpc-tests.py | uscoin-project/uscoin | 6a459cc884133ba881c7ededce95320466bd4c07 | [
"MIT"
] | 1 | 2018-07-18T18:25:36.000Z | 2018-07-19T14:19:43.000Z | qa/pull-tester/rpc-tests.py | uscoin-project/uscoin | 6a459cc884133ba881c7ededce95320466bd4c07 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "USCOIND" not in os.environ:
os.environ["USCOIND"] = BUILDDIR + '/src/uscoind' + EXEEXT
if "USCOINCLI" not in os.environ:
os.environ["USCOINCLI"] = BUILDDIR + '/src/uscoin-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'p2p-fullblocktest.py',
'walletbackup.py',
'bip68-112-113-p2p.py',
'wallet.py',
'wallet-hd.py',
'wallet-dump.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
'p2p-segwit.py',
'segwit.py',
'importprunedfunds.py',
'signmessages.py',
'p2p-compactblocks.py',
'nulldummy.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'rpcbind_test.py',
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
'p2p-feefilter.py',
'pruning.py', # leave pruning last as it takes a REALLY long time
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print(stdout)
print('stderr:\n' if not stderr == '' else '', stderr)
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| 32.492958 | 133 | 0.617945 |
4a227f42e87ad0fd995b916d270fd428a4264263 | 5,262 | py | Python | inference_engine/efficientdet_pytorch/lib/efficientdet_folder.py | rohit0906/Monk_Object_Detection | aa96f0fa4629e12e2730164a571ea41aa0ee2278 | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | inference_engine/efficientdet_pytorch/lib/efficientdet_folder.py | rohit0906/Monk_Object_Detection | aa96f0fa4629e12e2730164a571ea41aa0ee2278 | [
"Apache-2.0"
] | 98 | 2020-01-21T09:41:30.000Z | 2022-03-12T00:53:06.000Z | inference_engine/efficientdet_pytorch/lib/efficientdet_folder.py | rohit0906/Monk_Object_Detection | aa96f0fa4629e12e2730164a571ea41aa0ee2278 | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | # Author: Zylo117
"""
Simple Inference Script of EfficientDet-Pytorch
"""
import os
import time
import torch
from torch.backends import cudnn
from matplotlib import colors
from backbone import EfficientDetBackbone
import cv2
import numpy as np
from efficientdet.utils import BBoxTransform, ClipBoxes
from utils.utils import preprocess, invert_affine, postprocess, STANDARD_COLORS, standard_to_bgr, get_index_label, plot_one_box
from tqdm import tqdm
compound_coef = 7
force_input_size = None # set None to use default size
img_folder = "efficientdet_d7/night_traffic_signal_sign"
# replace this part with your project's anchor config
anchor_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
anchor_scales = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
threshold = 0.2
iou_threshold = 0.2
use_cuda = True
use_float16 = False
cudnn.fastest = True
cudnn.benchmark = True
obj_list = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', '', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', '', 'backpack', 'umbrella', '', '', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', '', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut',
'cake', 'chair', 'couch', 'potted plant', 'bed', '', 'dining table', '', '', 'toilet', '', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
'refrigerator', '', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
'toothbrush']
color_list = standard_to_bgr(STANDARD_COLORS)
# tf bilinear interpolation is different from any other's, just make do
input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
input_size = input_sizes[compound_coef] if force_input_size is None else force_input_size
model = EfficientDetBackbone(compound_coef=compound_coef, num_classes=len(obj_list),
ratios=anchor_ratios, scales=anchor_scales)
model.load_state_dict(torch.load(f'weights/efficientdet-d{compound_coef}.pth'))
model.requires_grad_(False)
model.eval()
if use_cuda:
model = model.cuda()
if use_float16:
model = model.half()
def display(preds, imgs, img_path, imshow=True, imwrite=False):
for i in range(len(imgs)):
if len(preds[i]['rois']) == 0:
continue
for j in range(len(preds[i]['rois'])):
x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
obj = obj_list[preds[i]['class_ids'][j]]
score = float(preds[i]['scores'][j])
plot_one_box(imgs[i], [x1, y1, x2, y2], label=obj,score=score,color=color_list[get_index_label(obj, obj_list)])
if imshow:
cv2.imshow('img', imgs[i])
cv2.waitKey(0)
if imwrite:
write_path = img_path.split(".")[0] + "_predicted.jpg";
cv2.imwrite(write_path, imgs[i]);
img_lists = os.listdir(img_folder);
for i in tqdm(range(len(img_lists))):
img_path = img_folder + "/" + img_lists[i];
ori_imgs, framed_imgs, framed_metas = preprocess(img_path, max_size=input_size)
if use_cuda:
x = torch.stack([torch.from_numpy(fi).cuda() for fi in framed_imgs], 0)
else:
x = torch.stack([torch.from_numpy(fi) for fi in framed_imgs], 0)
x = x.to(torch.float32 if not use_float16 else torch.float16).permute(0, 3, 1, 2)
with torch.no_grad():
features, regression, classification, anchors = model(x)
regressBoxes = BBoxTransform()
clipBoxes = ClipBoxes()
out = postprocess(x,
anchors, regression, classification,
regressBoxes, clipBoxes,
threshold, iou_threshold)
out = invert_affine(framed_metas, out)
display(out, ori_imgs, img_path, imshow=False, imwrite=True)
'''
print('running speed test...')
with torch.no_grad():
print('test1: model inferring and postprocessing')
print('inferring image for 10 times...')
t1 = time.time()
for _ in range(10):
_, regression, classification, anchors = model(x)
out = postprocess(x,
anchors, regression, classification,
regressBoxes, clipBoxes,
threshold, iou_threshold)
out = invert_affine(framed_metas, out)
t2 = time.time()
tact_time = (t2 - t1) / 10
print(f'{tact_time} seconds, {1 / tact_time} FPS, @batch_size 1')
# uncomment this if you want a extreme fps test
# print('test2: model inferring only')
# print('inferring images for batch_size 32 for 10 times...')
# t1 = time.time()
# x = torch.cat([x] * 32, 0)
# for _ in range(10):
# _, regression, classification, anchors = model(x)
#
# t2 = time.time()
# tact_time = (t2 - t1) / 10
# print(f'{tact_time} seconds, {32 / tact_time} FPS, @batch_size 32')
'''
| 35.315436 | 127 | 0.621437 |
4a227fc60809b432272adaccdfcb403da15956fa | 1,578 | py | Python | src/microgp/fitness/lexicographic.py | leonardogian/microgp4 | 69f06791e4f29373c06a4aabfbd2229e766f4f2c | [
"Apache-2.0"
] | null | null | null | src/microgp/fitness/lexicographic.py | leonardogian/microgp4 | 69f06791e4f29373c06a4aabfbd2229e766f4f2c | [
"Apache-2.0"
] | null | null | null | src/microgp/fitness/lexicographic.py | leonardogian/microgp4 | 69f06791e4f29373c06a4aabfbd2229e766f4f2c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#############################################################################
# __________ #
# __ __/ ____/ __ \__ __ This file is part of MicroGP4 v1.0 "Kiwi" #
# / / / / / __/ /_/ / // / (!) by Giovanni Squillero and Alberto Tonda #
# / /_/ / /_/ / ____/ // /_ https://github.com/squillero/microgp4 #
# \__ /\____/_/ /__ __/ #
# /_/ --MicroGP4-- /_/ "You don't need a big goal, be μ-ambitious!!" #
# #
#############################################################################
# Copyright 2020 Giovanni Squillero and Alberto Tonda
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from .fitnesstuple import FitnessTuple
class Lexicographic(FitnessTuple):
"""Tuples able to smoothly handle single- and multi-value fitnesses"""
def __gt__(self, other: "Lexicographic") -> bool:
return tuple.__gt__(self, other)
| 45.085714 | 77 | 0.52725 |
4a227fdfab6d9653ac3733e1f927d7aa1d248b2e | 903 | py | Python | Birnn_Transformer/ncc/tokenizers/utils.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | 71 | 2020-12-04T02:18:13.000Z | 2022-03-30T15:19:50.000Z | ncc/tokenizers/utils.py | hrshy0629/naturalcc | 9c3329dd8387c8242deb52bf590ebe3ac795f8de | [
"MIT"
] | 4 | 2021-03-10T17:48:50.000Z | 2022-03-13T10:42:22.000Z | ncc/tokenizers/utils.py | hrshy0629/naturalcc | 9c3329dd8387c8242deb52bf590ebe3ac795f8de | [
"MIT"
] | 11 | 2020-12-09T12:17:44.000Z | 2022-03-30T09:02:13.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ncc import tokenizers
def get_whole_word_mask(args, dictionary):
bpe = tokenizers.build_bpe(args)
if bpe is not None:
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(
map(is_beginning_of_word, range(len(dictionary)))
))
return mask_whole_words
return None
| 30.1 | 67 | 0.621262 |
4a22807dd6ab83e52ea43bc069b0d759658061d1 | 9,263 | py | Python | lib/galaxy/util/compression_utils.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 47 | 2015-10-21T23:30:30.000Z | 2022-03-09T06:51:32.000Z | lib/galaxy/util/compression_utils.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 78 | 2019-01-18T08:12:49.000Z | 2022-03-13T08:56:41.000Z | lib/galaxy/util/compression_utils.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 35 | 2015-10-30T13:09:40.000Z | 2021-05-03T23:17:46.000Z | import gzip
import io
import logging
import os
import tarfile
import zipfile
from galaxy.util.path import safe_relpath
from .checkers import (
bz2,
is_bz2,
is_gzip
)
log = logging.getLogger(__name__)
def get_fileobj(filename, mode="r", compressed_formats=None):
"""
Returns a fileobj. If the file is compressed, return an appropriate file
reader. In text mode, always use 'utf-8' encoding.
:param filename: path to file that should be opened
:param mode: mode to pass to opener
:param compressed_formats: list of allowed compressed file formats among
'bz2', 'gzip' and 'zip'. If left to None, all 3 formats are allowed
"""
return get_fileobj_raw(filename, mode, compressed_formats)[1]
def get_fileobj_raw(filename, mode="r", compressed_formats=None):
if compressed_formats is None:
compressed_formats = ['bz2', 'gzip', 'zip']
# Remove 't' from mode, which may cause an error for compressed files
mode = mode.replace('t', '')
# 'U' mode is deprecated, we open in 'r'.
if mode == 'U':
mode = 'r'
compressed_format = None
if 'gzip' in compressed_formats and is_gzip(filename):
fh = gzip.GzipFile(filename, mode)
compressed_format = 'gzip'
elif 'bz2' in compressed_formats and is_bz2(filename):
fh = bz2.BZ2File(filename, mode)
compressed_format = 'bz2'
elif 'zip' in compressed_formats and zipfile.is_zipfile(filename):
# Return fileobj for the first file in a zip file.
# 'b' is not allowed in the ZipFile mode argument
# since it always opens files in binary mode.
# For emulating text mode, we will be returning the binary fh in a
# TextIOWrapper.
zf_mode = mode.replace('b', '')
with zipfile.ZipFile(filename, zf_mode) as zh:
fh = zh.open(zh.namelist()[0], zf_mode)
compressed_format = 'zip'
elif 'b' in mode:
return compressed_format, open(filename, mode)
else:
return compressed_format, open(filename, mode, encoding='utf-8')
if 'b' not in mode:
return compressed_format, io.TextIOWrapper(fh, encoding='utf-8')
else:
return compressed_format, fh
def file_iter(fname, sep=None):
"""
This generator iterates over a file and yields its lines
splitted via the C{sep} parameter. Skips empty lines and lines starting with
the C{#} character.
>>> lines = [ line for line in file_iter(__file__) ]
>>> len(lines) != 0
True
"""
with get_fileobj(fname) as fh:
for line in fh:
if line and line[0] != '#':
yield line.split(sep)
class CompressedFile:
@staticmethod
def can_decompress(file_path):
return tarfile.is_tarfile(file_path) or zipfile.is_zipfile(file_path)
def __init__(self, file_path, mode='r'):
if tarfile.is_tarfile(file_path):
self.file_type = 'tar'
elif zipfile.is_zipfile(file_path) and not file_path.endswith('.jar'):
self.file_type = 'zip'
self.file_name = os.path.splitext(os.path.basename(file_path))[0]
if self.file_name.endswith('.tar'):
self.file_name = os.path.splitext(self.file_name)[0]
self.type = self.file_type
method = f'open_{self.file_type}'
if hasattr(self, method):
self.archive = getattr(self, method)(file_path, mode)
else:
raise NameError(f'File type {self.file_type} specified, no open method found.')
@property
def common_prefix_dir(self):
"""
Get the common prefix directory for all the files in the archive, if any.
Returns '' if the archive contains multiple files and/or directories at
the root of the archive.
"""
contents = self.getmembers()
common_prefix = ''
if len(contents) > 1:
common_prefix = os.path.commonprefix([self.getname(item) for item in contents])
# If the common_prefix does not end with a slash, check that is a
# directory and all other files are contained in it
if len(common_prefix) >= 1 and not common_prefix.endswith(os.sep) and self.isdir(self.getmember(common_prefix)) \
and all(self.getname(item).startswith(common_prefix + os.sep) for item in contents if self.isfile(item)):
common_prefix += os.sep
if not common_prefix.endswith(os.sep):
common_prefix = ''
return common_prefix
def extract(self, path):
'''Determine the path to which the archive should be extracted.'''
contents = self.getmembers()
extraction_path = path
common_prefix_dir = self.common_prefix_dir
if len(contents) == 1:
# The archive contains a single file, return the extraction path.
if self.isfile(contents[0]):
extraction_path = os.path.join(path, self.file_name)
if not os.path.exists(extraction_path):
os.makedirs(extraction_path)
self.archive.extractall(extraction_path, members=self.safemembers())
else:
if not common_prefix_dir:
extraction_path = os.path.join(path, self.file_name)
if not os.path.exists(extraction_path):
os.makedirs(extraction_path)
self.archive.extractall(extraction_path, members=self.safemembers())
# Since .zip files store unix permissions separately, we need to iterate through the zip file
# and set permissions on extracted members.
if self.file_type == 'zip':
for zipped_file in contents:
filename = self.getname(zipped_file)
absolute_filepath = os.path.join(extraction_path, filename)
external_attributes = self.archive.getinfo(filename).external_attr
# The 2 least significant bytes are irrelevant, the next two contain unix permissions.
unix_permissions = external_attributes >> 16
if unix_permissions != 0:
if os.path.exists(absolute_filepath):
os.chmod(absolute_filepath, unix_permissions)
else:
log.warning(f"Unable to change permission on extracted file '{absolute_filepath}' as it does not exist")
return os.path.abspath(os.path.join(extraction_path, common_prefix_dir))
def safemembers(self):
members = self.archive
common_prefix_dir = self.common_prefix_dir
if self.file_type == "tar":
for finfo in members:
if not safe_relpath(finfo.name):
raise Exception(f"Path '{finfo.name}' is blocked (illegal path).")
if finfo.issym() or finfo.islnk():
link_target = os.path.join(os.path.dirname(finfo.name), finfo.linkname)
if not safe_relpath(link_target) or not os.path.normpath(link_target).startswith(common_prefix_dir):
raise Exception(f"Link '{finfo.name}' to '{finfo.linkname}' is blocked.")
yield finfo
elif self.file_type == "zip":
for name in members.namelist():
if not safe_relpath(name):
raise Exception(f"{name} is blocked (illegal path).")
yield name
def getmembers_tar(self):
return self.archive.getmembers()
def getmembers_zip(self):
return self.archive.infolist()
def getname_tar(self, item):
return item.name
def getname_zip(self, item):
return item.filename
def getmember(self, name):
for member in self.getmembers():
if self.getname(member) == name:
return member
def getmembers(self):
return getattr(self, f'getmembers_{self.type}')()
def getname(self, member):
return getattr(self, f'getname_{self.type}')(member)
def isdir(self, member):
return getattr(self, f'isdir_{self.type}')(member)
def isdir_tar(self, member):
return member.isdir()
def isdir_zip(self, member):
if member.filename.endswith(os.sep):
return True
return False
def isfile(self, member):
if not self.isdir(member):
return True
return False
def open_tar(self, filepath, mode):
return tarfile.open(filepath, mode, errorlevel=0)
def open_zip(self, filepath, mode):
return zipfile.ZipFile(filepath, mode)
def zipfile_ok(self, path_to_archive):
"""
This function is a bit pedantic and not functionally necessary. It checks whether there is
no file pointing outside of the extraction, because ZipFile.extractall() has some potential
security holes. See python zipfile documentation for more details.
"""
basename = os.path.realpath(os.path.dirname(path_to_archive))
zip_archive = zipfile.ZipFile(path_to_archive)
for member in zip_archive.namelist():
member_path = os.path.realpath(os.path.join(basename, member))
if not member_path.startswith(basename):
return False
return True
| 39.58547 | 128 | 0.628738 |
4a22813e872fc9b780ed8df8f183fb6f634683d2 | 5,490 | py | Python | lib/models/resnet.py | Jinming-Su/SGNet | fcf35edaf332c1a4e2713acad5a0fc0e21509c3e | [
"MIT"
] | 13 | 2021-10-15T15:14:45.000Z | 2022-03-09T00:22:55.000Z | lib/models/resnet.py | Jinming-Su/SGNet | fcf35edaf332c1a4e2713acad5a0fc0e21509c3e | [
"MIT"
] | 4 | 2021-10-17T09:04:20.000Z | 2022-03-25T06:43:00.000Z | lib/models/resnet.py | Jinming-Su/SGNet | fcf35edaf332c1a4e2713acad5a0fc0e21509c3e | [
"MIT"
] | 2 | 2021-11-17T11:31:35.000Z | 2021-11-29T06:50:35.000Z | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.utils.model_zoo as modelzoo
#from modules import InPlaceABNSync as BatchNorm2d
from torch.nn import BatchNorm2d
resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
resnet50_url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
resnet101_url = 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'
class Bottleneck(nn.Module):
def __init__(self,
in_chan,
out_chan,
stride = 1,
stride_at_1x1 = False,
dilation = 1,
*args, **kwargs):
super(Bottleneck, self).__init__(*args, **kwargs)
stride1x1, stride3x3 = (stride, 1) if stride_at_1x1 else (1, stride)
assert out_chan % 4 == 0
mid_chan = int(out_chan / 4)
self.conv1 = nn.Conv2d(in_chan,
mid_chan,
kernel_size = 1,
stride = stride1x1,
bias = False)
self.bn1 = BatchNorm2d(mid_chan)
self.conv2 = nn.Conv2d(mid_chan,
mid_chan,
kernel_size = 3,
stride = stride3x3,
padding = dilation,
dilation = dilation,
bias = False)
self.bn2 = BatchNorm2d(mid_chan)
self.conv3 = nn.Conv2d(mid_chan,
out_chan,
kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if in_chan != out_chan or stride != 1:
self.downsample = nn.Sequential(
nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_chan))
self.init_weight()
def forward(self, x):
residual = self.conv1(x)
residual = self.bn1(residual)
residual = self.relu(residual)
residual = self.conv2(residual)
residual = self.bn2(residual)
residual = self.relu(residual)
residual = self.conv3(residual)
residual = self.bn3(residual)
if self.downsample == None:
inten = x
else:
inten = self.downsample(x)
out = residual + inten
out = self.relu(out)
return out
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
def create_stage(in_chan, out_chan, b_num, stride=1, dilation=1):
assert out_chan % 4 == 0
mid_chan = out_chan / 4
blocks = [Bottleneck(in_chan, out_chan, stride=stride, dilation=dilation),]
for i in range(1, b_num):
blocks.append(Bottleneck(out_chan, out_chan, stride=1, dilation=dilation))
return nn.Sequential(*blocks)
class Resnet50(nn.Module):
def __init__(self, stride=32, *args, **kwargs):
super(Resnet50, self).__init__()
assert stride in (8, 16, 32)
dils = [1, 1] if stride==32 else [el*(16//stride) for el in (1, 2)]
strds = [2 if el==1 else 1 for el in dils]
self.conv1 = nn.Conv2d(
3,
64,
kernel_size = 7,
stride = 2,
padding = 3,
bias = False)
self.bn1 = BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(
kernel_size = 3,
stride = 2,
padding = 1,
dilation = 1,
ceil_mode = False)
#[3, 4, 6, 3]
self.layer1 = create_stage(64, 256, 3, stride=1, dilation=1)
self.layer2 = create_stage(256, 512, 4, stride=2, dilation=1)
self.layer3 = create_stage(512, 1024, 6, stride=strds[0], dilation=dils[0])
self.layer4 = create_stage(1024, 2048, 3, stride=strds[1], dilation=dils[1])
self.init_weight()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
feat4 = self.layer1(x)
feat8 = self.layer2(feat4)
feat16 = self.layer3(feat8)
feat32 = self.layer4(feat16)
return feat4, feat8, feat16, feat32
def init_weight(self):
#state_dict = modelzoo.load_url(resnet50_url)
state_dict = torch.load('resnet50-19c8e357.pth')
self_state_dict = self.state_dict()
for k, v in self_state_dict.items():
if k in state_dict.keys():
self_state_dict.update({k: state_dict[k]})
self.load_state_dict(self_state_dict)
def get_params(self):
bn_params = []
non_bn_params = []
for name, param in self.named_parameters():
if 'bn' in name or 'downsample.1' in name:
bn_params.append(param)
else:
bn_params.append(param)
return bn_params, non_bn_params
if __name__ == "__main__":
# layer1 = create_stage(64, 256, 3, 1, 1)
# layer2 = create_stage(256, 512, 4, 2, 1)
# layer3 = create_stage(512, 1024, 6, 1, 2)
# layer4 = create_stage(1024, 2048, 3, 1, 4)
# print(layer4)
resnet = Resnet101Dilation8()
inten = torch.randn(1, 3, 224, 224)
_, _, _, out = resnet(inten)
print(out.size())
| 31.734104 | 87 | 0.565209 |
4a22819678ed1503f73497a4e814966cec1af233 | 2,757 | py | Python | var/spack/repos/builtin/packages/channelflow/package.py | whitfin/spack | aabd2be31a511d0e00c1017f7311a421659319d9 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/channelflow/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | var/spack/repos/builtin/packages/channelflow/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Channelflow(CMakePackage):
"""Channelflow is a software system for numerical analysis of the
incompressible fluid flow in channel geometries, written in C++.
"""
homepage = 'https://github.com/epfl-ecps/channelflow'
url = 'https://github.com/epfl-ecps/channelflow.git'
version(
'develop',
git='https://github.com/epfl-ecps/channelflow.git',
branch='master'
)
variant('shared', default=True, description='Build shared libs')
variant('mpi', default=True, description='Enable MPI parallelism')
variant('hdf5', default=True, description='Enable support for HDF5 I/O')
variant(
'netcdf', default='serial', values=('none', 'serial', 'parallel'),
multi=False, description='Level of support for NetCDF I/O'
)
variant('python', default=False, description='Build python bindings')
depends_on('eigen')
depends_on('fftw')
# MPI related constraints
depends_on('mpi', when='+mpi')
depends_on('fftw+mpi', when='+mpi')
# Support for different I/O formats
depends_on('hdf5+cxx', when='+hdf5')
depends_on('netcdf', when='netcdf=serial')
depends_on('netcdf+mpi', when='netcdf=parallel')
# Python bindings
depends_on('boost+python', when='+python')
conflicts('~mpi', when='netcdf=parallel', msg='Parallel NetCDF requires MPI')
conflicts(
'+mpi', when='+python',
msg='Building python bindings is possible only for the serial code'
)
conflicts('~mpi', when='^mpi',
msg='There should be no MPI in the DAG when ~mpi is active')
def cmake_args(self):
spec = self.spec
on_or_off = lambda predicate: 'ON' if predicate else 'OFF'
args = [
'-DBUILD_SHARED_LIBS:BOOL={0}'.format(
on_or_off('+shared' in spec)
),
'-DUSE_MPI:BOOL={0}'.format(on_or_off('+mpi' in spec)),
'-DWITH_HDF5CXX:BOOL={0}'.format(on_or_off('+hdf5' in spec)),
'-DWITH_PYTHON:BOOL={0}'.format(on_or_off('+python' in spec))
]
netcdf_str = {
'none': 'OFF',
'serial': 'Serial',
'parallel': 'Parallel'
}
args.append('-DWITH_NETCDF:STRING={0}'.format(
netcdf_str[spec.variants['netcdf'].value]
))
# Set an MPI compiler for parallel builds
if '+mpi' in spec:
args.append(
'-DCMAKE_CXX_COMPILER:PATH={0}'.format(spec['mpi'].mpicxx)
)
return args
| 32.05814 | 81 | 0.611172 |
4a2281f32d6de0ce199b994cd8084a7866d36b4d | 22,907 | py | Python | src/lib/thirdparty/logutils/dictconfig.py | gavin-anders/SSRFScanner | 68546de8cc1ea2e6141cf5eb7363ad448a07a138 | [
"Apache-2.0"
] | 1 | 2020-02-13T09:51:13.000Z | 2020-02-13T09:51:13.000Z | src/lib/thirdparty/logutils/dictconfig.py | gavin-anders/SSRFScanner | 68546de8cc1ea2e6141cf5eb7363ad448a07a138 | [
"Apache-2.0"
] | 7 | 2019-10-06T14:48:03.000Z | 2021-09-08T01:20:06.000Z | src/lib/thirdparty/logutils/dictconfig.py | gavin-anders/SSRFScanner | 68546de8cc1ea2e6141cf5eb7363ad448a07a138 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2009-2013 Vinay Sajip. See LICENSE.txt for details.
#
import logging.handlers
import re
import sys
import types
try:
basestring
except NameError:
basestring = str
try:
StandardError
except NameError:
StandardError = Exception
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
"Allows the importer to be redefined."
def __init__(self, config):
"""
Initialise an instance with the specified configuration
dictionary.
"""
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if isinstance(c, basestring):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
def named_handlers_supported():
major, minor = sys.version_info[:2]
if major == 2:
result = minor >= 7
elif major == 3:
result = minor >= 2
else:
result = (major > 3)
return result
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7, 3.2+)
if named_handlers_supported():
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = sorted(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError:
te = sys.exc_info()[1]
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if isinstance(c, basestring):
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError:
te = sys.exc_info()[1]
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError:
e = sys.exc_info()[1]
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
| 39.907666 | 80 | 0.509495 |
4a22838df8198f30086621a6a412792e6207fc6f | 2,680 | py | Python | tests/main_test.py | silnrsi/glyphsLib | fc9ac286874e30130679430b028a173062c311a0 | [
"Apache-2.0"
] | 1 | 2019-01-19T05:50:30.000Z | 2019-01-19T05:50:30.000Z | tests/main_test.py | DalavanCloud/glyphsLib | fc9ac286874e30130679430b028a173062c311a0 | [
"Apache-2.0"
] | null | null | null | tests/main_test.py | DalavanCloud/glyphsLib | fc9ac286874e30130679430b028a173062c311a0 | [
"Apache-2.0"
] | 1 | 2019-01-19T05:50:14.000Z | 2019-01-19T05:50:14.000Z | # coding=UTF-8
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import unittest
import subprocess
import os
import glob
import glyphsLib.__main__
import glyphsLib.parser
def test_glyphs_main_masters(tmpdir):
"""Tests the main of glyphsLib and also the `build_masters` function
that `fontmake` uses.
"""
filename = os.path.join(
os.path.dirname(__file__), 'data/GlyphsUnitTestSans.glyphs')
master_dir = os.path.join(str(tmpdir), 'master_ufos_test')
glyphsLib.__main__.main(['-g', filename, '-m', master_dir])
assert glob.glob(master_dir + '/*.ufo')
def test_glyphs_main_instances(tmpdir):
filename = os.path.join(
os.path.dirname(__file__), 'data/GlyphsUnitTestSans.glyphs')
master_dir = os.path.join(str(tmpdir), 'master_ufos_test')
inst_dir = os.path.join(str(tmpdir), 'inst_ufos_test')
glyphsLib.__main__.main(['-g', filename, '-m', master_dir, '-n', inst_dir])
assert glob.glob(master_dir + '/*.ufo')
assert glob.glob(inst_dir + '/*.ufo')
def test_glyphs_main_instances_relative_dir(tmpdir):
filename = os.path.join(
os.path.dirname(__file__), 'data/GlyphsUnitTestSans.glyphs')
master_dir = 'master_ufos_test'
inst_dir = 'inst_ufos_test'
cwd = os.getcwd()
try:
os.chdir(str(tmpdir))
glyphsLib.__main__.main(
['-g', filename, '-m', master_dir, '-n', inst_dir])
assert glob.glob(master_dir + '/*.ufo')
assert glob.glob(inst_dir + '/*.ufo')
finally:
os.chdir(cwd)
def test_parser_main(capsys):
"""This is both a test for the "main" functionality of glyphsLib.parser
and for the round-trip of GlyphsUnitTestSans.glyphs.
"""
filename = os.path.join(
os.path.dirname(__file__), 'data/GlyphsUnitTestSans.glyphs')
with open(filename) as f:
expected = f.read()
glyphsLib.parser.main([filename])
out, _err = capsys.readouterr()
assert expected == out, 'The roundtrip should output the .glyphs file unmodified.'
| 31.904762 | 86 | 0.691045 |
4a2283ef1e36be560b76ff055806a9cbf0d9abe3 | 499 | py | Python | kirjoitukset/urls.py | ntuomas/nettisivu-projekti | c0f74abaa22c7630dc439e924e20f208dec958e9 | [
"MIT"
] | null | null | null | kirjoitukset/urls.py | ntuomas/nettisivu-projekti | c0f74abaa22c7630dc439e924e20f208dec958e9 | [
"MIT"
] | null | null | null | kirjoitukset/urls.py | ntuomas/nettisivu-projekti | c0f74abaa22c7630dc439e924e20f208dec958e9 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.KirjoitusListView.as_view(), name='koti'),
path('kirjoitus/<int:pk>', views.KirjoitusDetailView.as_view(), name='yksityiskohdat'),
path('kirjoitus/uusi/', views.KirjoitusCreateView.as_view(), name='uusi_kirjoitus'),
path('kirjoitus/<int:pk>/muokkaa/', views.KirjoitusUpdateView.as_view(), name='muokkaa'),
path('kirjoitus/<int:pk>/poista/', views.KirjoitusDeleteView.as_view(), name='poista'),
]
| 38.384615 | 93 | 0.713427 |
4a22842f59fc23f8aa421078639bdc7359d2eb3d | 1,357 | py | Python | examples/tutorial.py | johertrich/Inertial-Stochastic-PALM | 8ef7be8d741c01ecd23e049d976f28f468d301cb | [
"MIT"
] | 1 | 2021-06-23T04:00:50.000Z | 2021-06-23T04:00:50.000Z | examples/tutorial.py | johertrich/Inertial-Stochastic-PALM | 8ef7be8d741c01ecd23e049d976f28f468d301cb | [
"MIT"
] | null | null | null | examples/tutorial.py | johertrich/Inertial-Stochastic-PALM | 8ef7be8d741c01ecd23e049d976f28f468d301cb | [
"MIT"
] | null | null | null | # This code belongs to the paper
#
# J. Hertrich and G. Steidl.
# Inertial Stochastic PALM (iSPALM) and Applications in Machine Learning.
# ArXiv preprint arXiv:2005.02204, 2020.
#
# Please cite the paper, if you use the code.
from palm_algs import *
# implement model functions
def H(X,batch):
diffs1=tf.add(batch,-X[0])
diffs2=tf.add(batch,-X[1])
return tf.reduce_sum(diffs2**2)-tf.reduce_sum(diffs1**2)
def prox_1(x,lam):
my_norm=tf.sqrt(tf.reduce_sum(x**2))
if my_norm<1:
return x
return x/my_norm
def prox_2(x,lam):
return tf.multiply(tf.math.sign(x),tf.math.maximum(tf.math.abs(x)-1/lam,0))
def f_1(x):
my_norm=tf.math.sqrt(tf.reduce_sum(x**2))
if my_norm>1.:
a=tf.constant(np.inf,dtype=tf.float32)
else:
a=tf.constant(0,dtype=tf.float32)
return a
def f_2(x):
return tf.reduce_sum(tf.math.abs(x))
# initialization
d=5
inits=[np.zeros(d).astype(np.float32),np.zeros(d).astype(np.float32)]
n=10000
data=np.random.normal(loc=0.5,size=[n,d]).astype(np.float32)
# model declaration
model=PALM_Model(inits)
model.H=H
model.prox_funs=[prox_1,prox_2]
model.f=[f_1,f_2]
# run algorithm
method='iSPALM-SARAH'
optim=PALM_Optimizer(model,data=data,method=method)
optim.optimize(EPOCHS=10)
# print result
print('X_1='+str(model.X[0].numpy())+', X_2='+str(model.X[1].numpy()))
| 23.807018 | 79 | 0.687546 |
4a228458e057a71200fbb6451ced93aba750f67d | 6,044 | py | Python | PCRC-MCDR/utils/pycraft/networking/packets/packet.py | Dark-Night-Base/MCDP | fbdba3c2b7a919d625067cbd473cdbe779af3256 | [
"MIT"
] | 2 | 2020-04-17T13:32:53.000Z | 2020-04-28T10:16:28.000Z | PCRC-MCDR/utils/pycraft/networking/packets/packet.py | Dark-Night-Base/MCDP | fbdba3c2b7a919d625067cbd473cdbe779af3256 | [
"MIT"
] | null | null | null | PCRC-MCDR/utils/pycraft/networking/packets/packet.py | Dark-Night-Base/MCDP | fbdba3c2b7a919d625067cbd473cdbe779af3256 | [
"MIT"
] | 1 | 2020-04-28T10:25:56.000Z | 2020-04-28T10:25:56.000Z | import copy
from .packet_buffer import PacketBuffer
from zlib import compress
from ..types import (
VarInt, Enum
)
class Packet(object):
packet_name = "base"
id = None
definition = None
data = None # PCRC
# To define the packet ID, either:
# 1. Define the attribute `id', of type int, in a subclass; or
# 2. Override `get_id' in a subclass and return the correct packet ID
# for the given ConnectionContext. This is necessary if the packet ID
# has changed across protocol versions, for example.
@classmethod
def get_id(cls, context):
return cls.id
# To define the network data layout of a packet, either:
# 1. Define the attribute `definition', a list of fields, each of which
# is a dict mapping attribute names to data types; or
# 2. Override `get_definition' in a subclass and return the correct
# definition for the given ConnectionContext. This may be necessary
# if the layout has changed across protocol versions, for example; or
# 3. Override the methods `read' and/or `write_fields' in a subclass.
# This may be necessary if the packet layout cannot be described as a
# simple list of fields.
@classmethod
def get_definition(cls, context):
return cls.definition
def __init__(self, context=None, **kwargs):
self.context = context
self.set_values(**kwargs)
@property
def context(self):
return self._context
@context.setter
def context(self, _context):
self._context = _context
self._context_changed()
def _context_changed(self):
if self._context is not None:
self.id = self.get_id(self._context)
self.definition = self.get_definition(self._context)
else:
self.id = None
self.definition = None
def set_values(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
return self
def read(self, file_object):
for field in self.definition:
for var_name, data_type in field.items():
value = data_type.read_with_context(file_object, self.context)
setattr(self, var_name, value)
# Writes a packet buffer to the socket with the appropriate headers
# and compressing the data if necessary
def _write_buffer(self, socket, packet_buffer, compression_threshold):
# compression_threshold of None means compression is disabled
if compression_threshold is not None:
if len(packet_buffer.get_writable()) > compression_threshold != -1:
# compress the current payload
packet_data = packet_buffer.get_writable()
compressed_data = compress(packet_data)
packet_buffer.reset()
# write out the length of the uncompressed payload
VarInt.send(len(packet_data), packet_buffer)
# write the compressed payload itself
packet_buffer.send(compressed_data)
else:
# write out a 0 to indicate uncompressed data
packet_data = packet_buffer.get_writable()
packet_buffer.reset()
VarInt.send(0, packet_buffer)
packet_buffer.send(packet_data)
VarInt.send(len(packet_buffer.get_writable()), socket) # Packet Size
socket.send(packet_buffer.get_writable()) # Packet Payload
def write(self, socket, compression_threshold=None):
# buffer the data since we need to know the length of each packet's
# payload
packet_buffer = PacketBuffer()
# write packet's id right off the bat in the header
VarInt.send(self.id, packet_buffer)
# write every individual field
self.write_fields(packet_buffer)
self.data = copy.deepcopy(packet_buffer.bytes.getvalue())
self._write_buffer(socket, packet_buffer, compression_threshold)
def write_fields(self, packet_buffer):
# Write the fields comprising the body of the packet (excluding the
# length, packet ID, compression and encryption) into a PacketBuffer.
for field in self.definition:
for var_name, data_type in field.items():
data = getattr(self, var_name)
data_type.send_with_context(data, packet_buffer, self.context)
def __repr__(self):
str = type(self).__name__
if self.id is not None:
str = '0x%02X %s' % (self.id, str)
fields = self.fields
if fields is not None:
inner_str = ', '.join('%s=%s' % (a, self.field_string(a))
for a in fields if hasattr(self, a))
str = '%s(%s)' % (str, inner_str)
return str
@property
def fields(self):
""" An iterable of the names of the packet's fields, or None. """
if self.definition is None:
return None
return (field for defn in self.definition for field in defn)
def field_string(self, field):
""" The string representation of the value of a the given named field
of this packet. Override to customise field value representation.
"""
value = getattr(self, field, None)
enum_class = self.field_enum(field, self.context)
if enum_class is not None:
name = enum_class.name_from_value(value)
if name is not None:
return name
return repr(value)
@classmethod
def field_enum(cls, field, context=None):
""" The subclass of 'pycraft.networking.types.Enum' associated with
this field, or None if there is no such class.
"""
enum_name = ''.join(s.capitalize() for s in field.split('_'))
if hasattr(cls, enum_name):
enum_class = getattr(cls, enum_name)
if isinstance(enum_class, type) and issubclass(enum_class, Enum):
return enum_class
| 38.993548 | 79 | 0.627399 |
4a228507532d3492cb247acb443659a30d0727c0 | 3,873 | py | Python | python/dsbox/template/template_files/loaded/DefaultLinkPredictionTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-05-10T22:19:44.000Z | 2020-07-21T07:28:39.000Z | python/dsbox/template/template_files/loaded/DefaultLinkPredictionTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 187 | 2018-04-13T17:19:24.000Z | 2020-04-21T00:41:15.000Z | python/dsbox/template/template_files/loaded/DefaultLinkPredictionTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-07-10T00:14:07.000Z | 2019-07-25T17:59:44.000Z | from dsbox.template.template import DSBoxTemplate
from d3m.metadata.problem import TaskKeyword
from dsbox.template.template_steps import TemplateSteps
from dsbox.schema import SpecializedProblem
import typing
import numpy as np # type: ignore
class DefaultLinkPredictionTemplate(DSBoxTemplate):
'''
Dummy implementation that does not look at the underlying graph at all.
'''
def __init__(self):
DSBoxTemplate.__init__(self)
self.template = {
"name": "Default_LinkPrediction_Template",
"taskType": {TaskKeyword.LINK_PREDICTION.name},
# for some special condition, the taskSubtype can be "NONE" which indicate no taskSubtype given
"taskSubtype": {TaskKeyword.LINK_PREDICTION.name},
"inputType": {"graph", "edgeList"},
"output": "model_step",
"steps": [
{
"name": "to_dataframe_step",
"primitives": ["d3m.primitives.data_transformation.dataset_to_dataframe.Common"],
"inputs": ["template_input"]
},
{
"name": "common_profiler_step",
"primitives": ["d3m.primitives.schema_discovery.profiler.Common"],
"inputs": ["to_dataframe_step"]
},
{
"name": "extract_attribute_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/PrimaryKey',
'https://metadata.datadrivendiscovery.org/types/Attribute',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["common_profiler_step"]
},
{
"name": "to_numeric_step",
"primitives": ["d3m.primitives.data_transformation.to_numeric.DSBOX"],
"inputs":["extract_attribute_step"],
},
{
"name": "extract_target_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TrueTarget',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["common_profiler_step"]
},
{
"name": "model_step",
"primitives": [{
"primitive": "d3m.primitives.classification.random_forest.SKlearn",
"hyperparameters": {
# 'bootstrap': ["bootstrap", "disabled"],
'max_depth': [15, 30, None],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10],
'max_features': ['auto', 'sqrt'],
'n_estimators': [10, 50, 100],
'add_index_columns': [True],
'use_semantic_types':[True],
}
}
],
"inputs": ["to_numeric_step", "extract_target_step"]
}
]
}
| 46.107143 | 115 | 0.452879 |
4a2285883355115bb20aebf4790fb44fc29493fc | 3,642 | py | Python | SimCalorimetry/EcalTrigPrimProducers/test/readDBTPConds_cfg.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | SimCalorimetry/EcalTrigPrimProducers/test/readDBTPConds_cfg.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | SimCalorimetry/EcalTrigPrimProducers/test/readDBTPConds_cfg.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("TPDBAn")
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load("Geometry.CaloEventSetup.CaloGeometry_cfi")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.DBParameters.authenticationPath = '/afs/cern.ch/cms/DB/conddb'
process.ecalTPConditions = cms.ESSource("PoolDBESSource",
process.CondDBSetup,
loadAll = cms.bool(True),
toGet = cms.VPSet(cms.PSet(
record = cms.string('EcalTPGPedestalsRcd'),
tag = cms.string('EcalTPGPedestals_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGLinearizationConstRcd'),
tag = cms.string('EcalTPGLinearizationConst_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGSlidingWindowRcd'),
tag = cms.string('EcalTPGSlidingWindow_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGFineGrainEBIdMapRcd'),
tag = cms.string('EcalTPGFineGrainEBIdMap_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGFineGrainStripEERcd'),
tag = cms.string('EcalTPGFineGrainStripEE_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGFineGrainTowerEERcd'),
tag = cms.string('EcalTPGFineGrainTowerEE_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGLutIdMapRcd'),
tag = cms.string('EcalTPGLutIdMap_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGWeightIdMapRcd'),
tag = cms.string('EcalTPGWeightIdMap_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGWeightGroupRcd'),
tag = cms.string('EcalTPGWeightGroup_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGLutGroupRcd'),
tag = cms.string('EcalTPGLutGroup_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGFineGrainEBGroupRcd'),
tag = cms.string('EcalTPGFineGrainEBGroup_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGPhysicsConstRcd'),
tag = cms.string('EcalTPGPhysicsConst_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGCrystalStatusRcd'),
tag = cms.string('EcalTPGCrystalStatus_v2_hlt')
),
cms.PSet(
record = cms.string('EcalTPGTowerStatusRcd'),
tag = cms.string('EcalTPGTowerStatus_hlt')
)),
messagelevel = cms.untracked.uint32(3),
timetype = cms.string('runnumber'),
connect = cms.string('oracle://cms_orcoff_prod/CMS_COND_34X_ECAL'),
authenticationMethod = cms.untracked.uint32(1),
loadBlobStreamer = cms.untracked.bool(True)
)
process.tpDBAnalyzer = cms.EDAnalyzer("EcalTPCondAnalyzer")
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
EcalTPCondAnalyzer = cms.untracked.PSet(
limit = cms.untracked.int32(100000000)
),
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('DEBUG')
),
debugModules = cms.untracked.vstring('tpDBAnalyzer')
)
process.p = cms.Path(process.tpDBAnalyzer)
| 33.109091 | 83 | 0.636463 |
4a22880585479433cd485b84a5da22deeefaaf82 | 7,209 | py | Python | rq/utils.py | zchcai/rq | 49b156ecc7ab16794f91f962326c2c24ec76f3a5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | rq/utils.py | zchcai/rq | 49b156ecc7ab16794f91f962326c2c24ec76f3a5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | rq/utils.py | zchcai/rq | 49b156ecc7ab16794f91f962326c2c24ec76f3a5 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Miscellaneous helper functions.
The formatter for ANSI colored console output is heavily based on Pygments
terminal colorizing code, originally by Georg Brandl.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import calendar
import datetime
import importlib
import logging
import numbers
import sys
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from .compat import as_text, is_python_version, string_types
from .exceptions import TimeoutFormatError
class _Colorizer(object):
def __init__(self):
esc = "\x1b["
self.codes = {}
self.codes[""] = ""
self.codes["reset"] = esc + "39;49;00m"
self.codes["bold"] = esc + "01m"
self.codes["faint"] = esc + "02m"
self.codes["standout"] = esc + "03m"
self.codes["underline"] = esc + "04m"
self.codes["blink"] = esc + "05m"
self.codes["overline"] = esc + "06m"
dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
light_colors = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
self.codes[d] = esc + "%im" % x
self.codes[l] = esc + "%i;01m" % x
x += 1
del d, l, x
self.codes["darkteal"] = self.codes["turquoise"]
self.codes["darkyellow"] = self.codes["brown"]
self.codes["fuscia"] = self.codes["fuchsia"]
self.codes["white"] = self.codes["bold"]
if hasattr(sys.stdout, "isatty"):
self.notty = not sys.stdout.isatty()
else:
self.notty = True
def reset_color(self):
return self.codes["reset"]
def colorize(self, color_key, text):
if self.notty:
return text
else:
return self.codes[color_key] + text + self.codes["reset"]
colorizer = _Colorizer()
def make_colorizer(color):
"""Creates a function that colorizes text with the given color.
For example:
green = make_colorizer('darkgreen')
red = make_colorizer('red')
Then, you can use:
print "It's either " + green('OK') + ' or ' + red('Oops')
"""
def inner(text):
return colorizer.colorize(color, text)
return inner
class ColorizingStreamHandler(logging.StreamHandler):
levels = {
logging.WARNING: make_colorizer('darkyellow'),
logging.ERROR: make_colorizer('darkred'),
logging.CRITICAL: make_colorizer('darkred'),
}
def __init__(self, exclude=None, *args, **kwargs):
self.exclude = exclude
super(ColorizingStreamHandler, self).__init__(*args, **kwargs)
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def format(self, record):
message = logging.StreamHandler.format(self, record)
if self.is_tty:
colorize = self.levels.get(record.levelno, lambda x: x)
# Don't colorize any traceback
parts = message.split('\n', 1)
parts[0] = " ".join([parts[0].split(" ", 1)[0], colorize(parts[0].split(" ", 1)[1])])
message = '\n'.join(parts)
return message
def import_attribute(name):
"""Return an attribute from a dotted path name (e.g. "path.to.func")."""
module_name, attribute = name.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, attribute)
def utcnow():
return datetime.datetime.utcnow()
_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def utcformat(dt):
return dt.strftime(as_text(_TIMESTAMP_FORMAT))
def utcparse(string):
try:
return datetime.datetime.strptime(string, _TIMESTAMP_FORMAT)
except ValueError:
# This catches any jobs remain with old datetime format
return datetime.datetime.strptime(string, '%Y-%m-%dT%H:%M:%SZ')
def first(iterable, default=None, key=None):
"""
Return first element of `iterable` that evaluates true, else return None
(or an optional default value).
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
"""
if key is None:
for el in iterable:
if el:
return el
else:
for el in iterable:
if key(el):
return el
return default
def is_nonstring_iterable(obj):
"""Returns whether the obj is an iterable, but not a string"""
return isinstance(obj, Iterable) and not isinstance(obj, string_types)
def ensure_list(obj):
"""
When passed an iterable of objects, does nothing, otherwise, it returns
a list with just that object in it.
"""
return obj if is_nonstring_iterable(obj) else [obj]
def current_timestamp():
"""Returns current UTC timestamp"""
return calendar.timegm(datetime.datetime.utcnow().utctimetuple())
def enum(name, *sequential, **named):
values = dict(zip(sequential, range(len(sequential))), **named)
# NOTE: Yes, we *really* want to cast using str() here.
# On Python 2 type() requires a byte string (which is str() on Python 2).
# On Python 3 it does not matter, so we'll use str(), which acts as
# a no-op.
return type(str(name), (), values)
def backend_class(holder, default_name, override=None):
"""Get a backend class using its default attribute name or an override"""
if override is None:
return getattr(holder, default_name)
elif isinstance(override, string_types):
return import_attribute(override)
else:
return override
def str_to_date(date_str):
if not date_str:
return
else:
return utcparse(date_str.decode())
def parse_timeout(timeout):
"""Transfer all kinds of timeout format to an integer representing seconds"""
if not isinstance(timeout, numbers.Integral) and timeout is not None:
try:
timeout = int(timeout)
except ValueError:
digit, unit = timeout[:-1], (timeout[-1:]).lower()
unit_second = {'d': 86400, 'h': 3600, 'm': 60, 's': 1}
try:
timeout = int(digit) * unit_second[unit]
except (ValueError, KeyError):
raise TimeoutFormatError('Timeout must be an integer or a string representing an integer, or '
'a string with format: digits + unit, unit can be "d", "h", "m", "s", '
'such as "1h", "23m".')
return timeout
| 28.721116 | 112 | 0.59939 |
4a2288ada92a7fe9d268898fc597c03f44b1752f | 363 | py | Python | MBAR_PyTorch/__init__.py | xqding/MBAR_PyTorch | b1daa07bddd98953777482c4fcff5d98e782f164 | [
"MIT"
] | 1 | 2018-08-09T16:39:32.000Z | 2018-08-09T16:39:32.000Z | MBAR_PyTorch/__init__.py | xqding/MBAR_PyTorch | b1daa07bddd98953777482c4fcff5d98e782f164 | [
"MIT"
] | null | null | null | MBAR_PyTorch/__init__.py | xqding/MBAR_PyTorch | b1daa07bddd98953777482c4fcff5d98e782f164 | [
"MIT"
] | null | null | null | """
MBAR_PyTorch is an implementation of the multistate Bennette acceprance ratio
(MBAR) [1] method using the PyTorch [2] library. Comparing with the package
pymbar [3], MBAR_PyTorch is faster when calculating free energyies for a large
num of states with a large num of conformations.
"""
from MBAR_PyTorch.MBAR import MBAR
from MBAR_PyTorch.MBAR import test
| 40.333333 | 78 | 0.793388 |
4a228902c106e63f0279ee4abaf73f8d18ff9b0e | 7,419 | py | Python | src/sage/categories/coxeter_group_algebras.py | drvinceknight/sage | 00199fb220aa173d8585b9e90654dafd3247d82d | [
"BSL-1.0"
] | 2 | 2015-08-11T05:05:47.000Z | 2019-05-15T17:27:25.000Z | src/sage/categories/coxeter_group_algebras.py | kaushik94/sage | 00199fb220aa173d8585b9e90654dafd3247d82d | [
"BSL-1.0"
] | null | null | null | src/sage/categories/coxeter_group_algebras.py | kaushik94/sage | 00199fb220aa173d8585b9e90654dafd3247d82d | [
"BSL-1.0"
] | 1 | 2020-07-24T11:56:55.000Z | 2020-07-24T11:56:55.000Z | import functools
from sage.misc.cachefunc import cached_method
from sage.categories.algebra_functor import AlgebrasCategory
class CoxeterGroupAlgebras(AlgebrasCategory):
class ParentMethods:
def demazure_lusztig_operator_on_basis(self, w, i, q1, q2, side="right"):
r"""
Return the result of applying the `i`-th Demazure Lusztig operator on ``w``.
INPUT:
- ``w`` -- an element of the Coxeter group
- ``i`` -- an element of the index set
- ``q1,q2`` -- two elements of the ground ring
- ``bar`` -- a boolean (default False)
See :meth:`demazure_lusztig_operators` for details.
EXAMPLES::
sage: W = WeylGroup(["B",3])
sage: W.element_class._repr_=lambda x: "".join(str(i) for i in x.reduced_word())
sage: K = QQ['q1,q2']
sage: q1, q2 = K.gens()
sage: KW = W.algebra(K)
sage: w = W.an_element()
sage: KW.demazure_lusztig_operator_on_basis(w, 0, q1, q2)
(-q2)*B[323123] + (q1+q2)*B[123]
sage: KW.demazure_lusztig_operator_on_basis(w, 1, q1, q2)
q1*B[1231]
sage: KW.demazure_lusztig_operator_on_basis(w, 2, q1, q2)
q1*B[1232]
sage: KW.demazure_lusztig_operator_on_basis(w, 3, q1, q2)
(q1+q2)*B[123] + (-q2)*B[12]
At `q_1=1` and `q_2=0` we recover the action of the isobaric divided differences `\pi_i`::
sage: KW.demazure_lusztig_operator_on_basis(w, 0, 1, 0)
B[123]
sage: KW.demazure_lusztig_operator_on_basis(w, 1, 1, 0)
B[1231]
sage: KW.demazure_lusztig_operator_on_basis(w, 2, 1, 0)
B[1232]
sage: KW.demazure_lusztig_operator_on_basis(w, 3, 1, 0)
B[123]
At `q_1=1` and `q_2=-1` we recover the action of the simple reflection `s_i`::
sage: KW.demazure_lusztig_operator_on_basis(w, 0, 1, -1)
B[323123]
sage: KW.demazure_lusztig_operator_on_basis(w, 1, 1, -1)
B[1231]
sage: KW.demazure_lusztig_operator_on_basis(w, 2, 1, -1)
B[1232]
sage: KW.demazure_lusztig_operator_on_basis(w, 3, 1, -1)
B[12]
"""
return (q1+q2) * self.monomial(w.apply_simple_projection(i,side=side)) - self.term(w.apply_simple_reflection(i, side=side), q2)
def demazure_lusztig_operators(self, q1, q2, side="right", affine=True):
r"""
Return the Demazure Lusztig operators acting on ``self``.
INPUT:
- ``q1,q2`` -- two elements of the ground ring `\KK`
- ``side`` -- "left" or "right" (default: "right"): which side to act upon
- ``affine`` -- a boolean (default: True)
The Demazure-Lusztig operator `T_i` is the linear map
`R\rightarrow R` obtained by interpolating between the
simple projection `\pi_i` (see
:meth:`CoxeterGroups.ElementMethods.simple_projection`)
and the simple reflection `s_i` so that `T_i` has
eigenvalues `q_1` and `q_2`.
.. MATH::
(q_1+q_2) \pi_i -q_2 s_i
The Demazure-Lusztig operators give the usual
representation of the operators `T_i` of the `q_1,q_2`
Hecke algebra associated to the Coxeter group.
For a finite Coxeter group, and if ``affine=True``, the
Demazure-Lusztig operators `T_1,\dots,T_n` are completed
by `T_0` to implement the level `0` action of the affine
Hecke algebra.
EXAMPLES::
sage: W = WeylGroup(["B",3])
sage: W.element_class._repr_=lambda x: "".join(str(i) for i in x.reduced_word())
sage: K = QQ['q1,q2']
sage: q1, q2 = K.gens()
sage: KW = W.algebra(K)
sage: T = KW.demazure_lusztig_operators(q1, q2, affine=True)
sage: x = KW.monomial(W.an_element()); x
B[123]
sage: T[0](x)
(-q2)*B[323123] + (q1+q2)*B[123]
sage: T[1](x)
q1*B[1231]
sage: T[2](x)
q1*B[1232]
sage: T[3](x)
(q1+q2)*B[123] + (-q2)*B[12]
sage: T._test_relations()
.. NOTE::
For a finite Weyl group `W`, the level 0 action of the
affine Weyl group `\tilde W` only depends on the
Coxeter diagram of the affinization, not its Dynkin
diagram. Hence it is possible to explore all cases
using only untwisted affinizations.
"""
from sage.combinat.root_system.hecke_algebra_representation import HeckeAlgebraRepresentation
W = self.basis().keys()
cartan_type = W.cartan_type()
if affine and cartan_type.is_finite():
cartan_type = cartan_type.affine()
T_on_basis = functools.partial(self.demazure_lusztig_operator_on_basis, q1=q1, q2=q2, side=side)
return HeckeAlgebraRepresentation(self, T_on_basis, cartan_type, q1, q2)
@cached_method
def demazure_lusztig_eigenvectors(self, q1, q2):
r"""
Return the family of eigenvectors for the Cherednik operators.
INPUT:
- ``self`` -- a finite Coxeter group `W`
- ``q1,q2`` -- two elements of the ground ring `\KK`
The affine Hecke algebra `H_{q_1,q_2}(\tilde W)` acts on
the group algebra of `W` through the Demazure-Lusztig
operators `T_i`. Its Cherednik operators `Y^\lambda` can
be simultaneously diagonalized as long as `q_1/q_2` is not
a small root of unity [HST2008]_.
This method returns the family of joint eigenvectors,
indexed by `W`.
.. SEEALSO::
- :meth:`demazure_lusztig_operators`
- :class:`sage.combinat.root_system.hecke_algebra_representation.CherednikOperatorsEigenvectors`
EXAMPLES::
sage: W = WeylGroup(["B",2])
sage: W.element_class._repr_=lambda x: "".join(str(i) for i in x.reduced_word())
sage: K = QQ['q1,q2'].fraction_field()
sage: q1, q2 = K.gens()
sage: KW = W.algebra(K)
sage: E = KW.demazure_lusztig_eigenvectors(q1,q2)
sage: E.keys()
Weyl Group of type ['B', 2] (as a matrix group acting on the ambient space)
sage: w = W.an_element()
sage: E[w]
(q2/(-q1+q2))*B[2121] + ((-q2)/(-q1+q2))*B[121] - B[212] + B[12]
"""
W = self.basis().keys()
if not W.cartan_type().is_finite():
raise ValueError("The Demazure-Lusztig eigenvectors are only defined for finite Coxeter groups")
result = self.demazure_lusztig_operators(q1, q2, affine=True).Y_eigenvectors()
w0 = W.long_element()
result.affine_lift = w0._mul_
result.affine_retract = w0._mul_
return result
| 42.153409 | 139 | 0.541852 |
4a22891b676f79a0c1a17eaf6741f028ec6bd2d1 | 7,887 | py | Python | examples/pwr_run/checkpointing/timed/max_par/job34.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/timed/max_par/job34.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/timed/max_par/job34.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.0006
args_model = 'resnet50'
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_param/' + job_name + '*'
total_epochs = 50
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_param/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
def on_epoch_end(self, epoch, logs=None):
open('epoch/' + job_name + '.txt', 'a').close()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
if not args.resume:
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
param_dict = {}
modify = False
with open('param_lock.json', 'r') as fp:
param_dict = json.load(fp)
if job_name not in param_dict:
param_dict[job_name] = trainable_count
modify = True
elif param_dict[job_name] != trainable_count:
param_dict[job_name] = trainable_count
modify = True
if modify:
json_file = json.dumps(param_dict)
with open('param_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('param_lock.json', 'param.json')
ckpt_qual_dict = {}
while True:
if os.path.exists('ckpt_qual.json'):
os.rename('ckpt_qual.json', 'ckpt_qual_lock.json')
break
else:
time.sleep(1)
with open('ckpt_qual_lock.json', 'r') as fp:
ckpt_qual_dict = json.load(fp)
ckpt_qual_dict[job_name] = 1
json_file2 = json.dumps(ckpt_qual_dict)
with open('ckpt_qual_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('ckpt_qual_lock.json', 'ckpt_qual.json')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
os.rename('finish.json', 'finish_lock.json')
break
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
| 31.674699 | 118 | 0.690884 |
4a228a4b1f7b81eb4c304ab4fb2e21abfacdf562 | 97 | py | Python | sport.py | liujanice/SpORT | 48daa47c89da07a5328f029e9f1fe82f294a7d3b | [
"BSD-3-Clause"
] | null | null | null | sport.py | liujanice/SpORT | 48daa47c89da07a5328f029e9f1fe82f294a7d3b | [
"BSD-3-Clause"
] | null | null | null | sport.py | liujanice/SpORT | 48daa47c89da07a5328f029e9f1fe82f294a7d3b | [
"BSD-3-Clause"
] | null | null | null | import pyglet
song = pyglet.media.load('GroceryScanningSound.wav')
song.play()
pyglet.app.run()
| 16.166667 | 52 | 0.762887 |
4a228acbf843f44b26fdd52ec021dc496f536e58 | 19,661 | py | Python | code/python/lib/mg_parallelization/pbs.py | alguru/metagenemark-2 | 3389a6bb306acea87ab3ccb63c47281aadafb273 | [
"MIT"
] | null | null | null | code/python/lib/mg_parallelization/pbs.py | alguru/metagenemark-2 | 3389a6bb306acea87ab3ccb63c47281aadafb273 | [
"MIT"
] | 1 | 2022-03-30T10:18:15.000Z | 2022-03-30T10:18:15.000Z | code/python/lib/mg_parallelization/pbs.py | gatech-genemark/MetaGeneMark-2-exp | 99f8cf091911b9200af97e722543ad84a247770f | [
"MIT"
] | 1 | 2021-04-06T12:43:12.000Z | 2021-04-06T12:43:12.000Z | # Author: karl
# Created: 2020-06-21, 9:34 a.m.
import os
import copy
import time
import logging
import numpy as np
from typing import *
from mg_general import Environment
from mg_io.general import mkdir_p, write_to_file, remove_p
from mg_general.general import get_value, run_shell_cmd
from mg_options.parallelization import ParallelizationOptions
from mg_parallelization.pbs_job_package import PBSJobPackage
log = logging.getLogger(__name__)
class FunctionArguments:
def __init__(self, **kwargs):
self._kwargs = kwargs
def get_arguments(self, data):
# type: (Dict[str, Any]) -> Dict[str, Any]
new_kwargs = copy.deepcopy(self._kwargs)
new_kwargs.update(data)
return new_kwargs
class PBS:
"""Runs any function on input data using PBS scheduler"""
def __init__(self, env, prl_options, splitter, merger, **kwargs):
# type: (Environment, ParallelizationOptions, Callable, Callable, Dict[str, Any]) -> None
"""Create a PBS instance that can run a function on
:param env: Environment
:param prl_options: Parallelization options that contains PBS configuration
:param splitter: How to split input data
:param merger: How to merge back all PBS job outputs
:param kwargs: Other arguments
"""
self._rng = np.random.RandomState(int(time.time()))
self._dry_run = get_value(kwargs, "dry_run", False)
self._env = env
if prl_options is None:
raise ValueError("prl_options cannot be None")
self._prl_options = copy.deepcopy(prl_options)
self._splitter = splitter
self._merger = merger
def _setup_pbs_run(self):
mkdir_p(self._prl_options["pbs-pd-head"])
def run(self, data, func, func_kwargs, **kwargs):
# type: (Dict[str, Any], Callable, Dict[str, Any], Dict[str, Any]) -> Any
"""
Run function on data using PBS scheduler
:param data: Dictionary containing data name and value
:param func: function to call on data with arguments in func_kwargs
:param func_kwargs: Additional arguments to be passed
:param kwargs:
:return: output of merger function
"""
job_name = get_value(kwargs, "job_name", "JOBNAME")
pf_input_package_template_formatted = os.path.join(
os.path.abspath(self._prl_options["pbs-pd-head"]), "input_package_{}"
)
num_jobs = min(self._prl_options["pbs-jobs"], len(data))
# 1) Parse all PBS arguments
self._setup_pbs_run()
# 2) Create input packages files, one for every PBS run
list_pf_input_job = self.create_input_package_files(
data, func, func_kwargs, num_jobs,
pf_package_template_formatted=pf_input_package_template_formatted,
**kwargs
)
num_jobs = len(list_pf_input_job)
# 3) Run all
list_pf_output_job_packages = self.execute_function_on_input_packages(
pf_input_package_template_formatted,
job_name=job_name, num_jobs=num_jobs
)
# 4) Merge end-results
data_output = None
if not self._dry_run:
data_output = self.merge_output_package_files(list_pf_output_job_packages)
# 5) Clean
#if self._prl_options.safe_get("pbs-clean"):
#remove_p(*list_pf_input_job)
# remove_p(*list_pf_output_job_packages)
return data_output
def run_on_generator(self, gen_data, func, func_kwargs, **kwargs):
# type: (Generator, Callable, Dict[str, Any], Dict[str, Any]) -> Any
"""
Run function on data using PBS scheduler
:param data: Dictionary containing data name and value
:param func: function to call on data with arguments in func_kwargs
:param func_kwargs: Additional arguments to be passed
:param kwargs:
:return: output of merger function
"""
job_name = get_value(kwargs, "job_name", "JOBNAME")
pf_input_package_template_formatted = os.path.join(
os.path.abspath(self._prl_options["pbs-pd-head"]), "input_package_{}"
)
num_jobs = self._prl_options["pbs-jobs"]
# 1) Parse all PBS arguments
self._setup_pbs_run()
# 2) Create input packages files, one for every PBS run
list_pf_input_job = self.create_input_package_files_from_generator(
gen_data, func, func_kwargs, num_jobs,
pf_package_template_formatted=pf_input_package_template_formatted,
**kwargs
)
num_jobs = len(list_pf_input_job)
# 3) Run all
list_pf_output_job_packages = self.execute_function_on_input_packages(
pf_input_package_template_formatted,
job_name=job_name, num_jobs=num_jobs
)
merge_kwargs = get_value(kwargs, "merge_kwargs", dict())
# 4) Merge end-results
data_output = None
if not self._dry_run:
data_output = self.merge_output_package_files(list_pf_output_job_packages, as_generator=True,
**merge_kwargs)
# 5) Clean
#if self._prl_options.safe_get("pbs-clean"):
#remove_p(*[f"{x}.pkl" for x in list_pf_input_job])
# remove_p(*[f"{x}.pkl" for x in list_pf_output_job_packages])
return data_output
def create_input_package_files(self, data, func, func_kwargs, num_splits, **kwargs):
# type: (Dict, Callable, Dict[str, Any], int, Dict[str, Any]) -> List[str]
"""
Run a function on the data using PBS
:param data: the entire data
:type data: DataHandler.D
:param func: the function to execute on the (split) data
:type func: Callable
:param func_kwargs: the remaining arguments (i.e. not data) to be passed to the function
:type func_kwargs: Dict[str, Any]
:param num_splits: number of job splits
:type num_splits: int
:param kwargs:
:return: List of paths to input package files
:rtype: List[str]
"""
split_collector = get_value(kwargs, "split_collector", None, valid_type=list) # type: List
split_kwargs = get_value(kwargs, "split_kwargs", None)
if split_kwargs is None:
split_kwargs = dict()
pd_work_pbs = self._prl_options["pbs-pd-head"]
pf_package_template_formatted = get_value(
kwargs, "pf_package_template_formatted", os.path.join(pd_work_pbs, "input_package_{}")
)
# Split data
list_split_data = self._splitter(data, num_splits, **split_kwargs)
# collect split data if requested
if split_collector is not None:
for d in list_split_data:
split_collector.append(d)
# Write package to disk
list_pf_data = self._package_and_save_list_data(list_split_data, func, func_kwargs,
pf_package_template_formatted)
# return list of filenames
return list_pf_data
def create_input_package_files_from_generator(self, data, func, func_kwargs, num_splits, **kwargs):
# type: (Dict, Callable, Dict[str, Any], int, Dict[str, Any]) -> List[str]
"""
Run a function on the data using PBS
:param data: the entire data
:type data: DataHandler.D
:param func: the function to execute on the (split) data
:type func: Callable
:param func_kwargs: the remaining arguments (i.e. not data) to be passed to the function
:type func_kwargs: Dict[str, Any]
:param num_splits: number of job splits
:type num_splits: int
:param kwargs:
:return: List of paths to input package files
:rtype: List[str]
"""
split_collector = get_value(kwargs, "split_collector", None, valid_type=list) # type: List
split_kwargs = get_value(kwargs, "split_kwargs", None)
if split_kwargs is None:
split_kwargs = dict()
pd_work_pbs = self._prl_options["pbs-pd-head"]
pf_package_template_formatted = get_value(
kwargs, "pf_package_template_formatted", os.path.join(pd_work_pbs, "input_package_{}")
)
# Split data
# list_split_data = self._splitter(data, num_splits, **split_kwargs)
# Write package to disk
list_pf_data = self._package_and_save_list_data_from_generator(
data, func, func_kwargs, pf_package_template_formatted, **split_kwargs
)
# return list of filenames
return list_pf_data
def execute_function_on_input_packages(self, pf_input_package_template_formatted, job_name, num_jobs):
# type: (str, str, int) -> List[str]
"""
Create PBS file for run and execute it, returning the paths to all the job output packages
:param pf_input_package_template_formatted:
:param job_name:
:param num_jobs:
:returns: list of paths to output file packages
:rtype: str
"""
pd_head = self._prl_options["pbs-pd-head"]
pf_pbs = os.path.join(self._prl_options["pbs-pd-head"], "run.pbs")
pf_input_package_template = pf_input_package_template_formatted.format("${PBS_ARRAYID}")
# create pbs file
pf_output_package_template = "{}_output".format(pf_input_package_template)
self._create_pbs_file(job_name, num_jobs, pf_pbs, pf_input_package_template, pf_output_package_template)
# run
if not self._dry_run:
array_job_name = PBS._qsub(pf_pbs)
# wait for jobs to end
self._wait_for_job_array(array_job_name, pd_head)
# collect all output files
list_pf_outputs = []
for x in range(1, num_jobs + 1):
if os.path.isfile(PBS.create_concrete_from_template(pf_output_package_template + ".pkl", x)):
list_pf_outputs.append(PBS.create_concrete_from_template(pf_output_package_template, x))
# write summary file
pf_pbs_summary = os.path.join(self._prl_options["pbs-pd-head"], self._prl_options["pbs-fn-summary"])
write_to_file("\n".join(list_pf_outputs), pf_pbs_summary)
return list_pf_outputs
@staticmethod
def _qsub(pf_pbs):
# type: (str) -> str
return run_shell_cmd("qsub -V " + pf_pbs, do_not_log=True).strip()
def _read_data_from_output_packages(self, list_pf_output_packages, as_generator=False):
# if not as_generator:
# list_data = list()
# for pf_output_package in list_pf_output_packages:
# list_data.append(PBSJobPackage.load(pf_output_package)["data"])
# return list_data
# else:
for pf_output_package in list_pf_output_packages:
yield PBSJobPackage.load(pf_output_package)["data"]
# remove_p(pf_output_package)
def merge_output_package_files(self, list_pf_output_packages, **kwargs):
as_generator = get_value(kwargs, "as_generator", False)
list_output_data = self._read_data_from_output_packages(list_pf_output_packages, as_generator)
if not as_generator:
list_output_data = [x for x in list_output_data]
# 4-a) Merge data while loading packages one by one
data_output = self._merger(list_output_data, **kwargs)
return data_output
def _package_and_save_data(self, data, func, func_kwargs, pf_package):
# type: (Dict[str, Any], Callable, Dict[str, Any], str) -> None
complete_func_kwargs = FunctionArguments(**func_kwargs).get_arguments(data)
PBSJobPackage.save(
{
"func": func,
"func_kwargs": complete_func_kwargs
},
pf_package
)
def _package_and_save_list_data(self, list_data, func, func_kwargs, pf_package_template_formatted):
# type: (List[Dict[str, Any]], Callable, Dict[str, Any], str) -> List[str]
list_pf = list()
file_number = 1
for data in list_data:
pf_save = pf_package_template_formatted.format(file_number)
self._package_and_save_data(data, func, func_kwargs, pf_save)
list_pf.append(pf_save)
file_number += 1
return list_pf
def _package_and_save_list_data_from_generator(self, gen_data, func, func_kwargs, pf_package_template_formatted,
**kwargs):
# type: (Generator, Callable, Dict[str, Any], str, Dict[str, Any]) -> List[str]
arg_name_data = get_value(kwargs, "arg_name_data", "data")
list_pf = list()
file_number = 1
for data in gen_data:
pf_save = pf_package_template_formatted.format(file_number)
self._package_and_save_data({arg_name_data: data}, func, func_kwargs, pf_save)
list_pf.append(pf_save)
file_number += 1
return list_pf
def _create_pbs_file(self, jobname, num_jobs, pf_pbs, pf_input_package_template, pf_output_package_template):
"""
Create PBS file for runnning all input jobs
:param jobname: Name of job
:param num_jobs:
:param pf_pbs:
:param pf_input_package_template:
:return:
"""
# create unique compute directory
pd_compute = None # run_shell_cmd("mktemp --tmpdir={}".format(self._prl_options["pbs-pd-root-compute"]))
pbs_text = PBS._generate_pbs_header_array(num_jobs, jobname, self._prl_options, pd_compute=pd_compute)
pbs_text += "\n{}\n".format(
PBS._generate_call_command(self._env,
pf_input_package_template,
pf_output_package_template,
self._prl_options,
pd_compute=pd_compute
)
)
# write to file
write_to_file(pbs_text, pf_pbs)
@staticmethod
def _generate_call_command(env, pf_job_input, pf_job_output, prl_options, pd_compute):
pd_compute = os.path.abspath(os.path.join(prl_options["pbs-pd-root-compute"], prl_options["pbs-dn-compute"]))
pd_job_template = os.path.join(pd_compute, "job_${PBS_ARRAYID}")
cmd = "{} --pf-job-input {} --pf-job-output {} --pd-work {} -l {}".format(
"python {}".format(os.path.join(env["pd-code"], "python/driver", "run-pbs-job.py")),
pf_job_input,
pf_job_output,
pd_job_template,
"DEBUG" # log.level
)
return cmd
@staticmethod
def create_concrete_from_template(pf_template, file_number):
"""Create a concrete file name based on template and file number
e.g. Calling the function with filename_${PBS_ARRAYID}.txt, 5 returns
filename_5.txt
:param pf_template: template of file
:type pf_template: str
:param file_number: the file's number
:returns: a concrete filename
"""
return pf_template.replace("${PBS_ARRAYID}", str(file_number))
def _wait_for_job_array(self, array_jobname, pd_work):
# type: (str, str) -> None
import string
def _create_dummy_pbs_file(pf_dummy, jobname_dummy, pd_work):
# type: (str, str, str) -> None
pbs_text = PBS.generate_pbs_header(jobname_dummy, pd_work, 1, 1, "00:00:01")
write_to_file(pbs_text, pf_dummy)
def _cmd_run_dummy_and_wait(pf_dummy, jobname_dummy, jobname_array):
cmd = "qsub -W depend=afteranyarray:{} {} \n".format(
jobname_array,
pf_dummy
)
cmd += r'while [ $(qstat -a | grep " R\|Q\|H " | grep ' + jobname_dummy + \
r' | wc -l) != 0 ]; do sleep 60 ; done'
return cmd
# generate a random filename for the dummy job
fn_dummy = ''.join(self._rng.choice(list(string.ascii_lowercase)) for _ in range(10))
pf_dummy = os.path.join(pd_work, fn_dummy)
# create an dummy pbs job that waits for the array to finish
_create_dummy_pbs_file(pf_dummy, fn_dummy, pd_work)
# generate pbs command to wait for job-array to finish and then run this dummy job
cmd = _cmd_run_dummy_and_wait(pf_dummy, fn_dummy, array_jobname)
# run command that waits
run_shell_cmd(cmd, do_not_log=True)
@staticmethod
def generate_pbs_header(job_name, working_dir=".", num_nodes=1, ppn=1, walltime="00:30:00"):
pbs_text = ""
pbs_text += "#PBS -N " + str(job_name) + "\n"
pbs_text += "#PBS -o " + str(working_dir) + "\n"
pbs_text += "#PBS -j oe" + "\n"
pbs_text += "#PBS -l nodes=" + str(num_nodes) + ":ppn=" + str(ppn) + "\n"
pbs_text += "#PBS -l walltime=" + str(walltime) + "\n"
pbs_text += "#PBS -W umask=002" + "\n"
pbs_text += "set PBS_O_WORKDIR = " + str(working_dir) + "\n"
pbs_text += "cd $PBS_O_WORKDIR \n"
pbs_text += "echo The working directory is `echo $PBS_O_WORKDIR`" + "\n"
pbs_text += "echo This job runs on the following nodes:" + "\n"
pbs_text += "echo `cat $PBS_NODEFILE`" + "\n"
return pbs_text
@staticmethod
def _generate_pbs_header_array(num_jobs, job_name, prl_options, pd_compute):
"""
:param num_jobs:
:param job_name:
:param prl_options:
:type prl_options: ParallelizationOptions
:return:
"""
num_nodes = prl_options["pbs-nodes"]
ppn = prl_options["pbs-ppn"]
walltime = prl_options["pbs-walltime"]
pd_compute = os.path.abspath(os.path.join(prl_options["pbs-pd-root-compute"], prl_options["pbs-dn-compute"]))
pd_job_template = os.path.join(pd_compute, "job_${PBS_ARRAYID}")
pd_pbs_logs = os.path.join(prl_options["pbs-pd-head"], "pbs_logs")
mkdir_p(pd_pbs_logs)
node_property = prl_options.safe_get("pbs-node-property")
if node_property is not None:
node_property = ":" + node_property
else:
node_property = ""
pbs_text = ""
pbs_text += "#PBS -N " + str(job_name) + "\n"
pbs_text += "#PBS -o " + "{}/{}".format(pd_pbs_logs, "error_${PBS_ARRAYID}") + "\n"
pbs_text += "#PBS -j oe" + "\n"
pbs_text += "#PBS -l nodes=" + str(num_nodes) + ":ppn=" + str(ppn) + "{}\n".format(node_property)
pbs_text += "#PBS -l walltime=" + str(walltime) + "\n"
if prl_options:
array_param = "1-{}".format(num_jobs)
if prl_options["pbs-concurrent-nodes"]:
total_concurrent_jobs = prl_options["pbs-concurrent-nodes"] * int(8 / ppn)
array_param = "{}%{}".format(array_param, total_concurrent_jobs)
pbs_text += "#PBS -t {}".format(array_param) + "\n"
pbs_text += "#PBS -W umask=002" + "\n"
#pbs_text += "export PATH=\"/home/karl/anaconda/envs/biogem_sbsp/bin:$PATH\"\n"
pbs_text += "mkdir -p {}".format(pd_job_template) + "\n"
pbs_text += "PBS_O_WORKDIR=" + pd_job_template + "\n"
pbs_text += "cd $PBS_O_WORKDIR \n"
pbs_text += "sleep 10\n"
pbs_text += "echo The working directory is `echo $PBS_O_WORKDIR`" + "\n"
pbs_text += "echo This job runs on the following nodes:" + "\n"
pbs_text += "echo `cat $PBS_NODEFILE`" + "\n"
return pbs_text
| 36.274908 | 117 | 0.621738 |
4a228bf50b7c9d0bb7afcfc76fa49e036d7205b6 | 27,693 | py | Python | torc/torc.py | ispielma/torc | b1374602ccfa9aff1df97136fecdc5327799866b | [
"BSD-2-Clause"
] | null | null | null | torc/torc.py | ispielma/torc | b1374602ccfa9aff1df97136fecdc5327799866b | [
"BSD-2-Clause"
] | 1 | 2021-06-30T05:30:27.000Z | 2021-06-30T05:30:27.000Z | torc/torc.py | ispielma/torc | b1374602ccfa9aff1df97136fecdc5327799866b | [
"BSD-2-Clause"
] | 1 | 2021-06-23T22:09:51.000Z | 2021-06-23T22:09:51.000Z | import numpy as np
from scipy.special import ellipk, ellipe
from scipy.constants import mu_0
pi = np.pi
import torc
DEFAULT_ARC_SEGS = 12
DEFAULT_CROSS_SEC_SEGS = 12
def _formatobj(obj, *attrnames):
"""Format an object and some attributes for printing"""
attrs = ", ".join(f"{name}={getattr(obj, name, None)}" for name in attrnames)
return f"<{obj.__class__.__name__}({attrs}) at {hex(id(obj))}>"
def _get_factors(n):
"""return all the factors of n"""
factors = set()
for i in range(1, int(n ** (0.5)) + 1):
if not n % i:
factors.update((i, n // i))
return factors
def _segments(x_min, x_max, y_min, y_max, N_segments):
"""Find the optimal cartesian grid for splitting up a rectangle of spanning x_min to
x_max and y_min to y_max into N_segments equal sized segments such that each segment
is as close to square as possible. This is the same as minimising the surface area
between segments. Return a list of the midpoints of each segment"""
size_x = x_max - x_min
size_y = y_max - y_min
lowest_surface_area = None
for n_x in _get_factors(N_segments):
n_y = N_segments // n_x
surface_area = n_x * size_y + n_y * size_x
if lowest_surface_area is None or surface_area < lowest_surface_area:
lowest_surface_area = surface_area
best_n_x, best_n_y = n_x, n_y
dx = size_x / best_n_x
dy = size_y / best_n_y
midpoints = []
for x in np.linspace(x_min + dx / 2, x_max - dx / 2, best_n_x):
for y in np.linspace(y_min + dy / 2, y_max - dy / 2, best_n_y):
midpoints.append((x, y))
return midpoints
def _rectangular_tube(x0, x1, y0, y1, z0, z1, nz=2, bevel=0.075):
"""Create 3 2D arrays x, y, z for the points on the surface of a tube with
rectangular cross section. x0, x1, y0 and y1 are the transverse extent of the
tube, z0 and z1 describe its longitudinal extent. nz may be specified, this is how
many points will be created along the z direction. Although this is not necessary
to describe a straight tube, a curved tube can be made by transforming the
returned points, in which case more than 2 points is necessary for a smooth
result. Bevel may be given, this is the fraction of the shorter side of the cross
section that will be chopped off the corners of the cross section to create a 45
degree bevel on each corner."""
b = bevel * min((y1 - y0), (x1 - x0))
# Four sides plus bevels plus duplicate final point to close the path
n_transverse = 9
# The shape of the cross section, with bevels:
y = np.array([y1 - b, y1, y1, y1 - b, y0 + b, y0, y0, y0 + b, y1 - b])
x = np.array([x0, x0 + b, x1 - b, x1, x1, x1 - b, x0 + b, x0, x0])
z = np.linspace(z0, z1, nz)
# Broadcasting
z = np.broadcast_to(z[:, np.newaxis], (nz, n_transverse))
x = np.broadcast_to(x, (nz, n_transverse))
y = np.broadcast_to(y, (nz, n_transverse))
return x, y, z
def _broadcast(r):
"""If r=(x, y, z) is a tuple or list of arrays or scalars, broadcast it to be a
single array with the list/tuple index corresponding to the first dimension."""
if not isinstance(r, np.ndarray):
return np.array(np.broadcast_arrays(*r))
return r
def field_of_current_loop(r, z, R, I):
"""Compute, in cylindrical coordinates, Br(r, z), Bz(r, z) of a current loop with
current I and radius R, centred at the origin with normal vector pointing in the z
direction"""
k2 = 4 * r * R / (z ** 2 + (R + r) ** 2)
E_k2 = ellipe(k2)
K_k2 = ellipk(k2)
rprime2 = z ** 2 + (r - R) ** 2
B_r_num = mu_0 * z * I * ((R ** 2 + z ** 2 + r ** 2) / rprime2 * E_k2 - K_k2)
B_r_denom = 2 * pi * r * np.sqrt(z ** 2 + (R + r) ** 2)
# Some hoop jumping to set B_r = 0 when r = 0 despite the expression having a
# division by zero in it in when r = 0:
if isinstance(r, np.ndarray):
B_r = np.zeros(B_r_denom.shape)
B_r[r != 0] = B_r_num[r != 0] / B_r_denom[r != 0]
elif r == 0:
B_r = 0.0
else:
B_r = B_r_num / B_r_denom
B_z_num = mu_0 * I * ((R ** 2 - z ** 2 - r ** 2) / rprime2 * E_k2 + K_k2)
B_z_denom = 2 * pi * np.sqrt(z ** 2 + (R + r) ** 2)
B_z = B_z_num / B_z_denom
return B_r, B_z
def field_of_current_line(r, z, L, I):
"""compute, in cylindrical coordinates, B_phi(r, z) of a current-carrying straight
wire of length L running from the origin to z = L with current flowing in the +z
direction."""
prefactor = mu_0 * I / (4 * pi * r)
term1 = z / np.sqrt(r ** 2 + z ** 2)
term2 = (L - z) / np.sqrt(r ** 2 + (L - z) ** 2)
return prefactor * (term1 + term2)
def _cross(a, b):
"""Cross product of a and b. For some reason np.cross is very slow, so here we
are."""
x = a[1] * b[2] - a[2] * b[1]
y = a[2] * b[0] - a[0] * b[2]
z = a[0] * b[1] - a[1] * b[0]
return np.array([x, y, z])
class CurrentObject(object):
def __init__(self, r0, zprime, xprime=None, n_turns=1, name=None):
"""A current-carrying object with a coordinate system centred at position r0 =
(x0, y0, z0), with primary axis pointing along zprime = (zprime_x, zprime_y,
zprime_z) and secondary axis pointing along xprime = (xprime_x, xprime_y,
xprime_z). These two axes define the orientation of a right handed coordinate
system (xprime, yprime, zprime) for the object with respect to the lab
coordinate directions (x, y, z). The two axes do not need to be normalised (they
will be normalised automatically), but must be orthogonal. if xprime is None
(perhaps if the object has rotational symmetry such that it doesn't matter), it
will be chosen randomly. n_turns is an overall multiplier for the current."""
self.r0 = np.array(r0)
self.zprime = np.array(zprime) / np.sqrt(np.dot(zprime, zprime))
if xprime is None:
# A random vector that is orthogonal to zprime:
xprime = _cross(np.random.randn(3), zprime)
self.xprime = np.array(xprime) / np.sqrt(np.dot(xprime, xprime))
if not abs(np.dot(self.xprime, self.zprime)) < 1e-10:
raise ValueError("Primary and secondary axes of object not orthogonal")
self.yprime = _cross(self.zprime, self.xprime)
# Rotation matrix from local frame to lab frame:
self.Q_rot = np.stack([self.xprime, self.yprime, self.zprime], axis=1)
self.n_turns = n_turns
self.name = name
@property
def x(self):
return self.r0[0]
@property
def y(self):
return self.r0[1]
@property
def z(self):
return self.r0[2]
def pos_to_local(self, r):
"""Take a point r = (x, y, z) in the lab frame and return rprime = (xprime,
yprime, zprime) in the local frame of reference of the object."""
r = _broadcast(r)
return np.einsum('ij,j...->i...', self.Q_rot.T, (r.T - self.r0).T)
def pos_to_lab(self, rprime):
"""Take a point rprime = (xprime, yprime, zprime) in the local frame of the
object and return r = (x, y, z) in the lab frame."""
rprime = _broadcast(rprime)
return (np.einsum('ij,j...->i...', self.Q_rot, rprime).T + self.r0).T
def vector_to_local(self, v):
"""Take a vector v = (v_x, v_y, v_z) in the lab frame and return vprime =
(v_xprime, v_yprime, v_zprime) in the local frame of reference of the object.
This is different to transforming coordinates as it only rotates the vector, it
does not translate it."""
v = _broadcast(v)
return np.einsum('ij,j...->i...', self.Q_rot.T, v)
def vector_to_lab(self, vprime):
"""Take a vector vprime=(v_xprime, v_yprime, v_zprime) in the local frame of the
object and return v = (v_x, v_y, v_z) in the lab frame. This is different to
transforming coordinates as it only rotates the vector, it does not translate
it."""
vprime = _broadcast(vprime)
return np.einsum('ij,j...->i...', self.Q_rot, vprime)
def B(self, r, I):
"""Return the magnetic field at position r=(x, y, z)"""
# r = _broadcast(r)
rprime = self.pos_to_local(r)
return self.vector_to_lab(self.B_local(rprime, I * self.n_turns))
def B_local(self, rprime, I):
return np.zeros_like(rprime)
def dB(self, r, I, s, ds=10e-6):
"""Return a magnetic field derivative at position r=(x, y, z) for a given
current. The derivative returned is that of the field vector in the direction s,
which can be 'x', 'y', 'z', or an arbitrary vector whose direction will be used
(magnitude ignored). Step size ds for numerical differentiation can be given,
otherwise defaults to 10um. Derivative is evaluated with a 2nd order central
finite difference."""
if isinstance(s, str):
try:
s = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}[s]
except KeyError:
raise KeyError("s must be one of 'x', 'y', 'z' or a vector") from None
s = np.array(s, dtype=float)
s /= np.sqrt(np.dot(s, s))
r = _broadcast(r)
rp = ((r.T) + s * ds).T
rm = ((r.T) - s * ds).T
return (self.B(rp, I) - self.B(rm, I)) / (2 * ds)
def surfaces(self):
return [self.pos_to_lab(pts) for pts in self.local_surfaces()]
def lines(self):
return [self.pos_to_lab(pts) for pts in self.local_lines()]
def local_surfaces(self):
return []
def local_lines(self):
return []
def show(
self, surfaces=True, lines=False, color=torc.COPPER, tube_radius=1e-3, **kwargs
):
from mayavi.mlab import mesh, plot3d
if surfaces:
surfaces = self.surfaces()
for x, y, z in surfaces:
surf = mesh(x, y, z, color=color, **kwargs)
surf.actor.property.specular = 1.0
surf.actor.property.specular_power = 128.0
if lines:
lines = self.lines()
for x, y, z in lines:
surf = plot3d(x, y, z, color=color, tube_radius=tube_radius, **kwargs)
surf.actor.property.specular = 0.0
surf.actor.property.specular_power = 10.0
def __str__(self):
return _formatobj(self, 'name')
def __repr__(self):
return self.__str__()
class Container(CurrentObject):
def __init__(
self,
*children,
r0=(0, 0, 0),
zprime=(0, 0, 1),
xprime=None,
n_turns=1,
name=None,
):
super().__init__(
r0=r0, zprime=zprime, xprime=xprime, n_turns=n_turns, name=name
)
self.children = list(children)
def add(self, *children):
for child in children:
self.children.append(child)
def __getitem__(self, key):
if isinstance(key, (int, np.integer, slice)):
return self.children[key]
elif isinstance(key, str):
for child in self.children:
if child.name == key:
return child
raise KeyError(f"no object in container with name {key}")
else:
msg = f"""Can only look up objects in container by integer index or string
name, not {type(key)} {key}"""
raise TypeError(' '.join(msg.split()))
def __delitem__(self, key):
if isinstance(key, (int, np.integer, slice)):
del self.children[key]
elif isinstance(key, str):
for child in self.children:
if child.name == key:
self.children.remove(child)
raise KeyError(f"no object in container with name {key}")
else:
msg = f"""Can only look up objects in container by integer index or string
name, not {type(key)} {key}"""
raise TypeError(' '.join(msg.split()))
def __len__(self):
return len(self.children)
def index(self, item):
return self.children.index(item)
def B(self, r, I):
Bs = []
for child in self.children:
Bs.append(child.B(r, I))
return sum(Bs)
def surfaces(self):
surfaces = super().surfaces()
for child in self.children:
surfaces.extend(child.surfaces())
return surfaces
def lines(self):
lines = super().lines()
for child in self.children:
lines.extend(child.lines())
return lines
class Loop(CurrentObject):
def __init__(self, r0, n, R, n_turns=1, name=None):
"""Counterclockwise current loop of radius R, centred at r0 = (x0, y0, z0) with
normal vector n=(nx, ny, nz)"""
super().__init__(r0=r0, zprime=n, n_turns=n_turns, name=name)
self.R = R
def B_local(self, rprime, I):
"""Field due to the loop at position rprime=(xprime, yprime, zprime) for current
I"""
xprime, yprime, zprime = rprime
# Expression we need to call is in cylindrical coordinates:
rho = np.sqrt(xprime ** 2 + yprime ** 2)
B_rho, B_zprime = field_of_current_loop(rho, zprime, self.R, I)
phi = np.arctan2(yprime, xprime)
B_xprime = B_rho * np.cos(phi)
B_yprime = B_rho * np.sin(phi)
return np.array([B_xprime, B_yprime, B_zprime])
def local_lines(self):
theta = np.linspace(-pi, pi, 361)
xprime = self.R * np.cos(theta)
yprime = self.R * np.sin(theta)
zprime = 0
return [(xprime, yprime, zprime)]
class Line(CurrentObject):
def __init__(self, r0, r1, n_turns=1, name=None):
"""Current line from r0 = (x0, y0, z0) to r1 = (x1, y1, z1) with current flowing
from the former to the latter"""
zprime = np.array(r1) - np.array(r0)
super().__init__(r0=r0, zprime=zprime, n_turns=n_turns, name=name)
self.L = np.sqrt(((np.array(r1) - np.array(r0)) ** 2).sum())
def B_local(self, rprime, I):
"""Field due to the loop at position rprime=(xprime, yprime, zprime) for current
I"""
xprime, yprime, zprime = rprime
# Expression we need to call is in cylindrical coordinates:
rho = np.sqrt(xprime ** 2 + yprime ** 2)
B_phi = field_of_current_line(rho, zprime, self.L, I)
phi = np.arctan2(yprime, xprime)
B_xprime = -B_phi * np.sin(phi)
B_yprime = B_phi * np.cos(phi)
return np.array([B_xprime, B_yprime, np.zeros_like(B_xprime)])
def local_lines(self):
zprime = np.array([0, self.L], dtype=float)
xprime = yprime = 0
return [(xprime, yprime, zprime)]
class Arc(Container):
def __init__(
self,
r0,
n,
n_perp,
R,
phi_0,
phi_1,
n_turns=1,
n_segs=DEFAULT_ARC_SEGS,
name=None,
):
"""Current arc forming part of a loop centred at r0 with normal vector n, from
angle theta_0 to theta_1 defined with respect to the direction n_perp, which
should be a direction perpendicular to n. Current is flowing from phi_0 to
phi_1, which if phi_0 < phi_1, is in the positive sense with respect to the
normal direction n. This arc is constructed out of n_seg separate line segments,
so the accuracy can be increased by increasing n_seg."""
super().__init__(r0=r0, zprime=n, xprime=n_perp, n_turns=n_turns, name=name)
self.R = R
self.phi_0 = phi_0
self.phi_1 = phi_1
delta_phi = (phi_1 - phi_0) / n_segs
for i in range(n_segs):
phi_seg_start = phi_0 + i * delta_phi
phi_seg_stop = phi_0 + (i + 1) * delta_phi
xprime0 = R * np.cos(phi_seg_start)
yprime0 = R * np.sin(phi_seg_start)
xprime1 = R * np.cos(phi_seg_stop)
yprime1 = R * np.sin(phi_seg_stop)
r0_seg = self.pos_to_lab((xprime0, yprime0, 0))
r1_seg = self.pos_to_lab((xprime1, yprime1, 0))
self.add(Line(r0_seg, r1_seg, n_turns=n_turns))
def local_lines(self):
n_theta = int((self.phi_1 - self.phi_0) / (pi / 36)) + 1 # ~every 5 degrees
theta = np.linspace(self.phi_0, self.phi_1, n_theta)
xprime = self.R * np.cos(theta)
yprime = self.R * np.sin(theta)
zprime = 0
return [(xprime, yprime, zprime)]
class RoundCoil(Container):
def __init__(
self,
r0,
n,
R_inner,
R_outer,
height,
n_turns=1,
cross_sec_segs=DEFAULT_CROSS_SEC_SEGS,
name=None,
):
"""A round loop of conductor with rectangular cross section, centred at r0 with
normal vector n, inner radius R_inner, outer radius R_outer, and the given
height (in the normal direction). The finite cross-section is approximated using
a number cross_sec_segs of 1D current loops distributed evenly through the cross
section. n_turns is an overall multiplier for the current used in field
calculations"""
super().__init__(r0=r0, zprime=n, n_turns=n_turns, name=name)
self.R_inner = R_inner
self.R_outer = R_outer
self.height = height
n_turns_per_seg = self.n_turns / cross_sec_segs
segs = _segments(R_inner, R_outer, -height / 2, height / 2, cross_sec_segs)
for R, zprime in segs:
r0_loop = self.pos_to_lab((0, 0, zprime))
self.add(Loop(r0_loop, n, R, n_turns=n_turns_per_seg))
def local_surfaces(self):
# Create arrays (in local coordinates) describing surfaces of the coil for
# plotting:
n_theta = 73 # 73 is every 5 degrees
r, zprime, theta = _rectangular_tube(
self.R_inner,
self.R_outer,
-self.height / 2,
self.height / 2,
-pi,
pi,
n_theta,
)
xprime = r * np.cos(theta)
yprime = r * np.sin(theta)
return [(xprime, yprime, zprime)]
class StraightSegment(Container):
def __init__(
self,
r0,
r1,
n,
width,
height,
n_turns=1,
cross_sec_segs=DEFAULT_CROSS_SEC_SEGS,
name=None,
):
"""A straight segment of conductor, with current flowing in a rectangular cross
section centred on the line from r0 to r1. A vector n normal to the direction of
current flow determines which direction the 'width' refers to, the height refers
to the size of the conductor in the remaining direction. The finite
cross-section is approximated using a number cross_sec_segs of 1D current lines
distributed evenly through the cross section. n_turns is an overall multiplier
for the current used in field calculations"""
r0 = np.array(r0, dtype=float)
r1 = np.array(r1, dtype=float)
super().__init__(r0=r0, zprime=r1 - r0, xprime=n, n_turns=n_turns, name=name)
self.width = width
self.height = height
self.L = np.sqrt(((np.array(r1) - np.array(r0)) ** 2).sum())
n_turns_per_seg = self.n_turns / cross_sec_segs
segs = _segments(-width / 2, width / 2, -height / 2, height / 2, cross_sec_segs)
for xprime, yprime in segs:
r0_line = self.pos_to_lab((xprime, yprime, 0))
r1_line = self.pos_to_lab((xprime, yprime, self.L))
self.add(Line(r0_line, r1_line, n_turns=n_turns_per_seg))
def local_surfaces(self):
# Create arrays (in local coordinates) describing surfaces of the segment for
# plotting:
xprime, yprime, zprime = _rectangular_tube(
-self.width / 2,
self.width / 2,
-self.height / 2,
self.height / 2,
0,
self.L,
2,
)
return [(xprime, yprime, zprime)]
class CurvedSegment(Container):
def __init__(
self,
r0,
n,
n_perp,
R_inner,
R_outer,
height,
phi_0,
phi_1,
n_turns=1,
cross_sec_segs=DEFAULT_CROSS_SEC_SEGS,
arc_segs=DEFAULT_ARC_SEGS,
name=None,
):
"""Rounded segment of conductor with rectangular cross section, forming part of
a round coil centred at r0 with normal vector n, from angle theta_0 to theta_1
defined with respect to the direction n_perp, which should be a direction
perpendicular to n. Current is flowing from phi_0 to phi_1, which if phi_0 <
phi_1, is in the positive sense with respect to the normal direction n. The
finite cross-section is approximated using a number cross_sec_segs of 1D current
arcs distributed evenly through the cross section, each itself approximated as
arc_segs separate current lines. n_turns is an overall multiplier for the
current used in field calculations"""
super().__init__(r0=r0, zprime=n, xprime=n_perp, n_turns=n_turns, name=name)
self.R_inner = R_inner
self.R_outer = R_outer
self.height = height
self.phi_0 = phi_0
self.phi_1 = phi_1
n_turns_per_seg = self.n_turns / cross_sec_segs
segs = _segments(R_inner, R_outer, -height / 2, height / 2, cross_sec_segs)
for R, zprime in segs:
r0_arc = self.pos_to_lab((0, 0, zprime))
self.add(Arc(r0_arc, n, n_perp, R, phi_0, phi_1, n_turns_per_seg, arc_segs))
def local_surfaces(self):
# Create arrays (in local coordinates) describing surfaces of the segment for
# plotting:
n_theta = int((self.phi_1 - self.phi_0) / (pi / 36)) + 1 # ~every 5 degrees
r, zprime, theta = _rectangular_tube(
self.R_inner,
self.R_outer,
-self.height / 2,
self.height / 2,
self.phi_0,
self.phi_1,
n_theta,
)
xprime = r * np.cos(theta)
yprime = r * np.sin(theta)
return [(xprime, yprime, zprime)]
class RacetrackCoil(Container):
def __init__(
self,
r0,
n,
n_perp,
width,
length,
height,
R_inner,
R_outer,
n_turns=1,
arc_segs=DEFAULT_ARC_SEGS,
cross_sec_segs=DEFAULT_CROSS_SEC_SEGS,
name=None,
):
"""A rectangular cross section coil comprising four straight segments and four
90-degree curved segments. The coil is centred at r0 with normal vector n, and
has the given height in the normal direction. n_perp defines direction along
which 'width' gives the distance between the inner surfaces of two straight
segments. 'length' gives the distance between the inner surfaces of the other
two straight segments. R_inner and R_outer are the inner and outer radii of
curvature of the curved segments. The finite cross-section is approximated using
a number cross_sec_segs of 1D current lines and arcs distributed evenly through
the cross section, and each arc is further approximated as arc_segs separate
current lines. n_turns is an overall multiplier for the current used in field
calculations"""
super().__init__(r0=r0, zprime=n, xprime=n_perp, n_turns=n_turns, name=name)
self.width = width
self.length = length
self.height = height
self.R_inner = R_inner
self.R_outer = R_outer
for xprime, yprime, phi_0, phi_1 in [
[width / 2 - R_inner, length / 2 - R_inner, 0, pi / 2],
[-width / 2 + R_inner, length / 2 - R_inner, pi / 2, pi],
[-width / 2 + R_inner, -length / 2 + R_inner, pi, 3 * pi / 2],
[width / 2 - R_inner, -length / 2 + R_inner, 3 * pi / 2, 2 * pi],
]:
self.add(
CurvedSegment(
self.pos_to_lab((xprime, yprime, 0)),
n,
n_perp,
R_inner,
R_outer,
height,
phi_0,
phi_1,
n_turns=self.n_turns,
cross_sec_segs=cross_sec_segs,
arc_segs=arc_segs,
)
)
# Top and bottom bars:
absxprime = width / 2 - R_inner
absyprime = (length + R_outer - R_inner) / 2
if absxprime != 0: # Exclude this segment if its length is zero:
for sign in [-1, +1]: # bottom, top
xprime0 = sign * absxprime
xprime1 = -sign * absxprime
yprime = sign * absyprime
self.add(
StraightSegment(
self.pos_to_lab((xprime0, yprime, 0)),
self.pos_to_lab((xprime1, yprime, 0)),
self.vector_to_lab((0, 1, 0)),
self.R_outer - self.R_inner,
self.height,
n_turns=n_turns,
cross_sec_segs=cross_sec_segs,
)
)
# Left and right bars
absyprime = length / 2 - R_inner
absxprime = (width + R_outer - R_inner) / 2
if absyprime != 0: # Exclude this segment if its length is zero:
for sign in [-1, +1]: # Left, right
yprime0 = -sign * absyprime
yprime1 = sign * absyprime
xprime = sign * absxprime
self.add(
StraightSegment(
self.pos_to_lab((xprime, yprime0, 0)),
self.pos_to_lab((xprime, yprime1, 0)),
self.vector_to_lab((1, 0, 0)),
self.R_outer - self.R_inner,
self.height,
n_turns=n_turns,
cross_sec_segs=cross_sec_segs,
)
)
class CoilPair(Container):
def __init__(self, coiltype, r0, n, displacement, *args, **kwargs):
"""A pair of coils of the given type (any class accepting r0 and n as its first
instantion arguments) centred on r0. One coil is at (r0 + displacement * n) and
has normal vector n, and the other is at (r0 - displacement * n). The second
coil has normal vector n if parity is 1 or the string 'helmholtz', and has
normal vector -n if parity is -1 or the string 'anti-helmholtz'. Remaining
arguments and keyword arguments will be passed to coiltype()."""
name = kwargs.pop('name', None)
super().__init__(r0=r0, zprime=n, name=name)
parity = kwargs.pop('parity', 'helmholtz')
if parity not in [+1, -1]:
if parity == 'helmholtz':
parity = +1
elif parity == 'anti-helmholtz':
parity = -1
else:
msg = "parity must be 'helmholtz' or 'anti-helmholtz' (or +/-1)."
raise ValueError(msg)
for unit_vec in [self.zprime, -self.zprime]:
r0_coil = r0 + displacement * unit_vec
n_coil = self.zprime if parity == +1 else unit_vec
self.add(coiltype(r0_coil, n_coil, *args, **kwargs))
def show(*args, **kwargs):
"""Wrapper around mayavi.mlab.show, passing all args and kwargs to it. Provided for
conveneience. This function imports mayavi only when called, so that mayavi is not
imported even if not being used"""
from mayavi.mlab import show
show(*args, **kwargs)
| 38.785714 | 88 | 0.585455 |
4a228bfa9195575510ab02c7a2da0e14b9aed30e | 24,698 | py | Python | blocks.py | iksungk/dynamical-machine-learning-limited-angular-views | 85b9ebcc674375cf38b7546ea549ea4d34f2a05d | [
"MIT"
] | null | null | null | blocks.py | iksungk/dynamical-machine-learning-limited-angular-views | 85b9ebcc674375cf38b7546ea549ea4d34f2a05d | [
"MIT"
] | null | null | null | blocks.py | iksungk/dynamical-machine-learning-limited-angular-views | 85b9ebcc674375cf38b7546ea549ea4d34f2a05d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
from tensorflow.keras.layers import Activation, Add, Dense, BatchNormalization, Concatenate, Dropout, Subtract, Flatten, Input, Lambda, Reshape
from tensorflow.keras.layers import Conv3D, Conv3DTranspose, MaxPool3D, AveragePooling3D, UpSampling3D, ConvLSTM2D
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, MaxPool2D, AveragePooling2D, UpSampling2D
from tensorflow.keras.layers import Layer, RepeatVector, Permute, Multiply, LeakyReLU
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, LearningRateScheduler, ModelCheckpoint, Callback, CSVLogger
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras import optimizers, regularizers
from tensorflow.keras import backend as K
from separable_layers import BiasLayer, SeparableConv3D, SeparableConv3DTranspose
import scipy.io as sio
import tensorflow as tf
import numpy as np
import sys
class SeparableConvGRU3D(Layer):
def __init__(self, convgru3d_filter, is_separable, reg=1e-4):
super(SeparableConvGRU3D, self).__init__()
self.is_separable = is_separable
self.convgru3d_filter = convgru3d_filter
self.kernel_size = 3
self.strides = (1,1,1)
self.dilation_rate = (1,1,1)
self.sigmoid = Activation('sigmoid')
self.tanh = Activation('tanh')
self.relu = Activation('relu')
self.Wr = SeparableConv3D(conv_filter=self.convgru3d_filter[0],
kernel_size=self.kernel_size,
strides=self.strides,
dilation_rate=self.dilation_rate,
use_bias=False,
is_separable=self.is_separable)
self.Ur = SeparableConv3D(conv_filter=self.convgru3d_filter[0],
kernel_size=self.kernel_size,
strides=self.strides,
dilation_rate=self.dilation_rate,
use_bias=False,
is_separable=self.is_separable)
self.br = BiasLayer()
self.Wz = SeparableConv3D(conv_filter=self.convgru3d_filter[0],
kernel_size=self.kernel_size,
strides=self.strides,
dilation_rate=self.dilation_rate,
use_bias=False,
is_separable=self.is_separable)
self.Uz = SeparableConv3D(conv_filter=self.convgru3d_filter[0],
kernel_size=self.kernel_size,
strides=self.strides,
dilation_rate=self.dilation_rate,
use_bias=False,
is_separable=self.is_separable)
self.bz = BiasLayer()
self.bh = BiasLayer()
self.W = SeparableConv3D(conv_filter=self.convgru3d_filter[0],
kernel_size=self.kernel_size,
strides=self.strides,
dilation_rate=self.dilation_rate,
use_bias=False,
is_separable=self.is_separable)
self.U = SeparableConv3D(conv_filter=self.convgru3d_filter[0], kernel_size=self.kernel_size,
strides=self.strides,
dilation_rate=self.dilation_rate,
use_bias=False,
is_separable=self.is_separable)
def call(self, x, h):
r = self.sigmoid(self.br(Add()([self.Wr(x), self.Ur(h)])))
z = self.sigmoid(self.bz(Add()([self.Wz(x), self.Uz(h)])))
r_x_h = self.U(Multiply()([r, h]))
th = self.relu(self.bh(Add()([self.W(x), r_x_h])))
ones_tensor = tf.constant(value=1.0, shape=z.shape, dtype=z.dtype)
cz = Subtract()([ones_tensor, z])
z_x_h = Multiply()([z, h])
cz_x_th = Multiply()([cz, th])
h = Add()([z_x_h, cz_x_th])
return h
# Input sequence: N_view x 64 x 64 x 4 x 1
# Input to ConvLSTM3D: N_view x 4 x 4 x 4 x M
# n_convfilter = [32, 48, 64, 96]
class encoder(Layer):
def __init__(self, n_convfilter, is_separable, reg=1e-4, dropout_rate=2e-2):
super(encoder, self).__init__()
self.is_separable = is_separable
self.n_convfilter = n_convfilter
self.sigmoid = Activation('sigmoid')
self.tanh = Activation('tanh')
self.relu = Activation('relu')
self.drop = Dropout(dropout_rate)
self.bn1a = BatchNormalization()
self.bn1b = BatchNormalization()
self.bn1c = BatchNormalization()
self.bn1d = BatchNormalization()
self.conv1a = SeparableConv3D(conv_filter=self.n_convfilter[0], kernel_size=3, strides=(1,2,2), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv1b = SeparableConv3D(conv_filter=self.n_convfilter[0], kernel_size=3, strides=(1,1,1), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv1c = SeparableConv3D(conv_filter=self.n_convfilter[0], kernel_size=1, strides=(1,2,2), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv1d = SeparableConv3D(conv_filter=self.n_convfilter[0], kernel_size=3, strides=(1,1,1), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv1e = SeparableConv3D(conv_filter=self.n_convfilter[0], kernel_size=3, strides=(1,1,1), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.bn2a = BatchNormalization()
self.bn2b = BatchNormalization()
self.bn2c = BatchNormalization()
self.bn2d = BatchNormalization()
self.conv2a = SeparableConv3D(conv_filter=self.n_convfilter[1], kernel_size=3, strides=(1,2,2), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv2b = SeparableConv3D(conv_filter=self.n_convfilter[1], kernel_size=3, strides=(1,1,1), dilation_rate = (1,2,2),
use_bias=True, is_separable=self.is_separable)
self.conv2c = SeparableConv3D(conv_filter=self.n_convfilter[1], kernel_size=1, strides=(1,2,2), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv2d = SeparableConv3D(conv_filter=self.n_convfilter[1], kernel_size=3, strides=(1,1,1), dilation_rate = (1,2,2),
use_bias=True, is_separable=self.is_separable)
self.conv2e = SeparableConv3D(conv_filter=self.n_convfilter[1], kernel_size=3, strides=(1,1,1), dilation_rate = (1,2,2),
use_bias=True, is_separable=self.is_separable)
self.bn3a = BatchNormalization()
self.bn3b = BatchNormalization()
self.bn3c = BatchNormalization()
self.bn3d = BatchNormalization()
self.conv3a = SeparableConv3D(conv_filter=self.n_convfilter[2], kernel_size=3, strides=(1,2,2), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv3b = SeparableConv3D(conv_filter=self.n_convfilter[2], kernel_size=3, strides=(1,1,1), dilation_rate = (1,2,2),
use_bias=True, is_separable=self.is_separable)
self.conv3c = SeparableConv3D(conv_filter=self.n_convfilter[2], kernel_size=1, strides=(1,2,2), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv3d = SeparableConv3D(conv_filter=self.n_convfilter[2], kernel_size=3, strides=(1,1,1), dilation_rate = (1,2,2),
use_bias=True, is_separable=self.is_separable)
self.conv3e = SeparableConv3D(conv_filter=self.n_convfilter[2], kernel_size=3, strides=(1,1,1), dilation_rate = (1,2,2),
use_bias=True, is_separable=self.is_separable)
self.bn4a = BatchNormalization()
self.bn4b = BatchNormalization()
self.bn4c = BatchNormalization()
self.bn4d = BatchNormalization()
self.conv4a = SeparableConv3D(conv_filter=self.n_convfilter[3], kernel_size=3, strides=(1,2,2), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv4b = SeparableConv3D(conv_filter=self.n_convfilter[3], kernel_size=3, strides=(1,1,1), dilation_rate = (1,2,2),
use_bias=True, is_separable=self.is_separable)
self.conv4c = SeparableConv3D(conv_filter=self.n_convfilter[3], kernel_size=1, strides=(1,2,2), dilation_rate = (1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv4d = SeparableConv3D(conv_filter=self.n_convfilter[3], kernel_size=3, strides=(1,1,1), dilation_rate = (1,2,2),
use_bias=True, is_separable=self.is_separable)
self.conv4e = SeparableConv3D(conv_filter=self.n_convfilter[3], kernel_size=3, strides=(1,1,1), dilation_rate = (1,2,2),
use_bias=True, is_separable=self.is_separable)
def call(self, x):
# Down-residual block 1
bn1a = self.bn1a(x)
relu1a = self.relu(bn1a)
conv1a = self.conv1a(relu1a)
bn1b = self.bn1b(conv1a)
relu1b = self.relu(bn1b)
conv1b = self.conv1b(relu1b)
conv1c = self.conv1c(x)
add1a = Add()([conv1b, conv1c])
bn1c = self.bn1c(add1a)
relu1c = self.relu(bn1c)
conv1d = self.conv1d(relu1c)
bn1d = self.bn1d(conv1d)
relu1d = self.relu(bn1d)
conv1e = self.conv1e(relu1d)
add1b = Add()([add1a, conv1e])
d1_out = self.drop(add1b)
# Down-residual block 2
bn2a = self.bn2a(d1_out)
relu2a = self.relu(bn2a)
conv2a = self.conv2a(relu2a)
bn2b = self.bn2b(conv2a)
relu2b = self.relu(bn2b)
conv2b = self.conv2b(relu2b)
conv2c = self.conv2c(d1_out)
add2a = Add()([conv2b, conv2c])
bn2c = self.bn2c(add2a)
relu2c = self.relu(bn2c)
conv2d = self.conv2d(relu2c)
bn2d = self.bn2d(conv2d)
relu2d = self.relu(bn2d)
conv2e = self.conv2e(relu2d)
add2b = Add()([add2a, conv2e])
d2_out = self.drop(add2b)
# Down-residual block 3
bn3a = self.bn3a(d2_out)
relu3a = self.relu(bn3a)
conv3a = self.conv3a(relu3a)
bn3b = self.bn3b(conv3a)
relu3b = self.relu(bn3b)
conv3b = self.conv3b(relu3b)
conv3c = self.conv3c(d2_out)
add3a = Add()([conv3b, conv3c])
bn3c = self.bn3c(add3a)
relu3c = self.relu(bn3c)
conv3d = self.conv3d(relu3c)
bn3d = self.bn3d(conv3d)
relu3d = self.relu(bn3d)
conv3e = self.conv3e(relu3d)
add3b = Add()([add3a, conv3e])
d3_out = self.drop(add3b)
# Down-residual block 4
bn4a = self.bn4a(d3_out)
relu4a = self.relu(bn4a)
conv4a = self.conv4a(relu4a)
bn4b = self.bn4b(conv4a)
relu4b = self.relu(bn4b)
conv4b = self.conv4b(relu4b)
conv4c = self.conv4c(d3_out)
add4a = Add()([conv4b, conv4c])
bn4c = self.bn4c(add4a)
relu4c = self.relu(bn4c)
conv4d = self.conv4d(relu4c)
bn4d = self.bn4d(conv4d)
relu4d = self.relu(bn4d)
conv4e = self.conv4e(relu4d)
add4b = Add()([add4a, conv4e])
d4_out = self.drop(add4b)
return d4_out
class decoder(Layer):
def __init__(self, n_deconvfilter, is_separable, reg=1e-4, dropout_rate=2e-2):
super(decoder, self).__init__()
self.n_deconvfilter = n_deconvfilter
self.is_separable = is_separable
self.relu = Activation('relu')
self.drop = Dropout(dropout_rate)
self.bn5a = BatchNormalization()
self.bn5b = BatchNormalization()
self.bn5c = BatchNormalization()
self.bn5d = BatchNormalization()
self.ct5a = SeparableConv3DTranspose(conv_filter=n_deconvfilter[0], kernel_size=3, strides=(1,2,2), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv5a = SeparableConv3D(conv_filter=n_deconvfilter[1], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.ct5b = SeparableConv3DTranspose(conv_filter=n_deconvfilter[1], kernel_size=2, strides=(1,2,2), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv5b = SeparableConv3D(conv_filter=n_deconvfilter[1], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv5c = SeparableConv3D(conv_filter=n_deconvfilter[1], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.bn6a = BatchNormalization()
self.bn6b = BatchNormalization()
self.bn6c = BatchNormalization()
self.bn6d = BatchNormalization()
self.ct6a = SeparableConv3DTranspose(conv_filter=n_deconvfilter[2], kernel_size=3, strides=(1,2,2), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv6a = SeparableConv3D(conv_filter=n_deconvfilter[2], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.ct6b = SeparableConv3DTranspose(conv_filter=n_deconvfilter[2], kernel_size=2, strides=(1,2,2), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv6b = SeparableConv3D(conv_filter=n_deconvfilter[2], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv6c = SeparableConv3D(conv_filter=n_deconvfilter[2], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.bn7a = BatchNormalization()
self.bn7b = BatchNormalization()
self.bn7c = BatchNormalization()
self.bn7d = BatchNormalization()
self.ct7a = SeparableConv3DTranspose(conv_filter=n_deconvfilter[3], kernel_size=3, strides=(1,2,2), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv7a = SeparableConv3D(conv_filter=n_deconvfilter[3], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.ct7b = SeparableConv3DTranspose(conv_filter=n_deconvfilter[3], kernel_size=2, strides=(1,2,2), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv7b = SeparableConv3D(conv_filter=n_deconvfilter[3], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv7c = SeparableConv3D(conv_filter=n_deconvfilter[3], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.bn8a = BatchNormalization()
self.bn8b = BatchNormalization()
self.bn8c = BatchNormalization()
self.bn8d = BatchNormalization()
self.ct8a = SeparableConv3DTranspose(conv_filter=n_deconvfilter[4], kernel_size=3, strides=(1,2,2), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv8a = SeparableConv3D(conv_filter=n_deconvfilter[4], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.ct8b = SeparableConv3DTranspose(conv_filter=n_deconvfilter[4], kernel_size=2, strides=(1,2,2), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv8b = SeparableConv3D(conv_filter=n_deconvfilter[4], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv8c = SeparableConv3D(conv_filter=n_deconvfilter[4], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.bn9a = BatchNormalization()
self.bn9b = BatchNormalization()
self.bn9c = BatchNormalization()
self.bn9d = BatchNormalization()
self.conv9a = SeparableConv3D(conv_filter=n_deconvfilter[4], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv9b = SeparableConv3D(conv_filter=n_deconvfilter[5], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv9c = SeparableConv3D(conv_filter=n_deconvfilter[5], kernel_size=1, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv9d = SeparableConv3D(conv_filter=n_deconvfilter[5], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv9e = SeparableConv3D(conv_filter=n_deconvfilter[5], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv9f = SeparableConv3D(conv_filter=n_deconvfilter[5], kernel_size=1, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.bn10a = BatchNormalization()
self.bn10b = BatchNormalization()
self.bn10c = BatchNormalization()
self.bn10d = BatchNormalization()
self.conv10a = SeparableConv3D(conv_filter=n_deconvfilter[6], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv10b = SeparableConv3D(conv_filter=n_deconvfilter[6], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv10c = SeparableConv3D(conv_filter=n_deconvfilter[6], kernel_size=1, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv10d = SeparableConv3D(conv_filter=n_deconvfilter[6], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv10e = SeparableConv3D(conv_filter=n_deconvfilter[6], kernel_size=3, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
self.conv10f = SeparableConv3D(conv_filter=n_deconvfilter[6], kernel_size=1, strides=(1,1,1), dilation_rate=(1,1,1),
use_bias=True, is_separable=self.is_separable)
def call(self, x):
# Up-residual block 1
bn5a = self.bn5a(x)
relu5a = self.relu(bn5a)
ct5a = self.ct5a(relu5a)
bn5b = self.bn5b(ct5a)
relu5b = self.relu(bn5b)
conv5a = self.conv5a(relu5b)
ct5b = self.ct5b(x)
add5a = Add()([conv5a, ct5b])
bn5c = self.bn5c(add5a)
relu5c = self.relu(bn5c)
conv5b = self.conv5b(relu5c)
bn5d = self.bn5d(conv5b)
relu5d = self.relu(bn5d)
conv5c = self.conv5c(relu5d)
add5b = Add()([conv5c, add5a])
# u1_out = Concatenate()([add5b, d3_out])
u1_out = self.drop(add5b)
# Up-residual block 2
bn6a = self.bn6a(u1_out)
relu6a = self.relu(bn6a)
ct6a = self.ct6a(relu6a)
bn6b = self.bn6b(ct6a)
relu6b = self.relu(bn6b)
conv6a = self.conv6a(relu6b)
ct6b = self.ct6b(u1_out)
add6a = Add()([conv6a, ct6b])
bn6c = self.bn6c(add6a)
relu6c = self.relu(bn6c)
conv6b = self.conv6b(relu6c)
bn6d = self.bn6d(conv6b)
relu6d = self.relu(bn6d)
conv6c = self.conv6c(relu6d)
add6b = Add()([add6a, conv6c])
# u2_out = Concatenate()([add6b, d2_out])
u2_out = self.drop(add6b)
# Up-residual block 3
bn7a = self.bn7a(u2_out)
relu7a = self.relu(bn7a)
ct7a = self.ct7a(relu7a)
bn7b = self.bn7b(ct7a)
relu7b = self.relu(bn7b)
conv7a = self.conv7a(relu7b)
ct7b = self.ct7b(u2_out)
add7a = Add()([conv7a, ct7b])
bn7c = self.bn7c(add7a)
relu7c = self.relu(bn7c)
conv7b = self.conv7b(relu7c)
bn7d = self.bn7d(conv7b)
relu7d = self.relu(bn7d)
conv7c = self.conv7c(relu7d)
add7b = Add()([add7a, conv7c])
# u3_out = Concatenate()([add7b, d1_out])
u3_out = self.drop(add7b)
# Up-residual block 4
bn8a = self.bn8a(u3_out)
relu8a = self.relu(bn8a)
ct8a = self.ct8a(relu8a)
bn8b = self.bn8b(ct8a)
relu8b = self.relu(bn8b)
conv8a = self.conv8a(relu8b)
ct8b = self.ct8b(u3_out)
add8a = Add()([conv8a, ct8b])
bn8c = self.bn8c(add8a)
relu8c = self.relu(bn8c)
conv8b = self.conv8b(relu8c)
bn8d = self.bn8d(conv8b)
relu8d = self.relu(bn8d)
conv8c = self.conv8c(relu8d)
add8b = Add()([add8a, conv8c])
# u4_out = Concatenate()([add8b, x])
u4_out = self.drop(add8b)
# Residual block 1
bn9a = self.bn9a(u4_out)
relu9a = self.relu(bn9a)
conv9a = self.conv9a(relu9a)
bn9b = self.bn9b(conv9a)
relu9b = self.relu(bn9b)
conv9b = self.conv9b(relu9b)
conv9c = self.conv9c(u4_out)
add9a = Add()([conv9b, conv9c])
bn9c = self.bn9c(add9a)
relu9c = self.relu(bn9c)
conv9d = self.conv9d(relu9c)
bn9d = self.bn9d(conv9d)
relu9d = self.relu(bn9d)
conv9e = self.conv9e(relu9d)
conv9f = self.conv9f(add9a)
add9b = Add()([conv9e, conv9f])
r1_out = self.drop(add9b)
# Residual block 2
bn10a = self.bn10a(r1_out)
relu10a = self.relu(bn10a)
conv10a = self.conv10a(relu10a)
bn10b = self.bn10b(conv10a)
relu10b = self.relu(bn10b)
conv10b = self.conv10b(relu10b)
conv10c = self.conv10c(r1_out)
add10a = Add()([conv10b, conv10c])
bn10c = self.bn10c(add10a)
relu10c = self.relu(bn10c)
conv10d = self.conv10d(relu10c)
bn10d = self.bn10d(conv10d)
relu10d = self.relu(bn10d)
conv10e = self.conv10e(relu10d)
conv10f = self.conv10f(add10a)
add10b = Add()([conv10e, conv10f])
r2_out = self.drop(add10b)
return r2_out
| 49.396 | 143 | 0.579116 |
4a228c30ca572bc10395721f1398c4746ba1653d | 9,130 | py | Python | assets/src/ba_data/python/bastd/ui/soundtrack/entrytypeselect.py | Dmitry450/ballistica | 27420d3f64c24bf3c9b4b047177a4769977659b1 | [
"MIT"
] | null | null | null | assets/src/ba_data/python/bastd/ui/soundtrack/entrytypeselect.py | Dmitry450/ballistica | 27420d3f64c24bf3c9b4b047177a4769977659b1 | [
"MIT"
] | null | null | null | assets/src/ba_data/python/bastd/ui/soundtrack/entrytypeselect.py | Dmitry450/ballistica | 27420d3f64c24bf3c9b4b047177a4769977659b1 | [
"MIT"
] | null | null | null | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Provides UI for selecting soundtrack entry types."""
from __future__ import annotations
import copy
from typing import TYPE_CHECKING
import _ba
import ba
if TYPE_CHECKING:
from typing import Any, Callable, Optional
class SoundtrackEntryTypeSelectWindow(ba.Window):
"""Window for selecting a soundtrack entry type."""
def __init__(self,
callback: Callable[[Any], Any],
current_entry: Any,
selection_target_name: str,
transition: str = 'in_right'):
from ba.internal import (get_soundtrack_entry_type,
supports_soundtrack_entry_type)
self._r = 'editSoundtrackWindow'
self._callback = callback
self._current_entry = copy.deepcopy(current_entry)
self._width = 580
self._height = 220
spacing = 80
do_default = True
do_mac_music_app_playlist = supports_soundtrack_entry_type(
'iTunesPlaylist')
do_music_file = supports_soundtrack_entry_type('musicFile')
do_music_folder = supports_soundtrack_entry_type('musicFolder')
if do_mac_music_app_playlist:
self._height += spacing
if do_music_file:
self._height += spacing
if do_music_folder:
self._height += spacing
super().__init__(root_widget=ba.containerwidget(
size=(self._width, self._height),
transition=transition,
scale=1.7 if ba.app.small_ui else 1.4 if ba.app.med_ui else 1.0))
btn = ba.buttonwidget(parent=self._root_widget,
position=(35, self._height - 65),
size=(160, 60),
scale=0.8,
text_scale=1.2,
label=ba.Lstr(resource='cancelText'),
on_activate_call=self._on_cancel_press)
ba.containerwidget(edit=self._root_widget, cancel_button=btn)
ba.textwidget(parent=self._root_widget,
position=(self._width * 0.5, self._height - 32),
size=(0, 0),
text=ba.Lstr(resource=self._r + '.selectASourceText'),
color=ba.app.title_color,
maxwidth=230,
h_align="center",
v_align="center")
ba.textwidget(parent=self._root_widget,
position=(self._width * 0.5, self._height - 56),
size=(0, 0),
text=selection_target_name,
color=ba.app.infotextcolor,
scale=0.7,
maxwidth=230,
h_align="center",
v_align="center")
v = self._height - 155
current_entry_type = get_soundtrack_entry_type(current_entry)
if do_default:
btn = ba.buttonwidget(parent=self._root_widget,
size=(self._width - 100, 60),
position=(50, v),
label=ba.Lstr(resource=self._r +
'.useDefaultGameMusicText'),
on_activate_call=self._on_default_press)
if current_entry_type == 'default':
ba.containerwidget(edit=self._root_widget, selected_child=btn)
v -= spacing
if do_mac_music_app_playlist:
btn = ba.buttonwidget(
parent=self._root_widget,
size=(self._width - 100, 60),
position=(50, v),
label=ba.Lstr(resource=self._r + '.useITunesPlaylistText'),
on_activate_call=self._on_mac_music_app_playlist_press,
icon=None)
if current_entry_type == 'iTunesPlaylist':
ba.containerwidget(edit=self._root_widget, selected_child=btn)
v -= spacing
if do_music_file:
btn = ba.buttonwidget(parent=self._root_widget,
size=(self._width - 100, 60),
position=(50, v),
label=ba.Lstr(resource=self._r +
'.useMusicFileText'),
on_activate_call=self._on_music_file_press,
icon=ba.gettexture('file'))
if current_entry_type == 'musicFile':
ba.containerwidget(edit=self._root_widget, selected_child=btn)
v -= spacing
if do_music_folder:
btn = ba.buttonwidget(parent=self._root_widget,
size=(self._width - 100, 60),
position=(50, v),
label=ba.Lstr(resource=self._r +
'.useMusicFolderText'),
on_activate_call=self._on_music_folder_press,
icon=ba.gettexture('folder'),
icon_color=(1.1, 0.8, 0.2))
if current_entry_type == 'musicFolder':
ba.containerwidget(edit=self._root_widget, selected_child=btn)
v -= spacing
def _on_mac_music_app_playlist_press(self) -> None:
from ba.internal import (get_soundtrack_entry_type,
get_soundtrack_entry_name)
from bastd.ui.soundtrack import itunes
ba.containerwidget(edit=self._root_widget, transition='out_left')
current_playlist_entry: Optional[str]
if get_soundtrack_entry_type(self._current_entry) == 'iTunesPlaylist':
current_playlist_entry = get_soundtrack_entry_name(
self._current_entry)
else:
current_playlist_entry = None
ba.app.main_menu_window = (itunes.MacMusicAppPlaylistSelectWindow(
self._callback, current_playlist_entry,
self._current_entry).get_root_widget())
def _on_music_file_press(self) -> None:
from ba.internal import get_valid_music_file_extensions
from bastd.ui import fileselector
ba.containerwidget(edit=self._root_widget, transition='out_left')
base_path = _ba.android_get_external_storage_path()
ba.app.main_menu_window = (fileselector.FileSelectorWindow(
base_path,
callback=self._music_file_selector_cb,
show_base_path=False,
valid_file_extensions=get_valid_music_file_extensions(),
allow_folders=False).get_root_widget())
def _on_music_folder_press(self) -> None:
from bastd.ui import fileselector
ba.containerwidget(edit=self._root_widget, transition='out_left')
base_path = _ba.android_get_external_storage_path()
ba.app.main_menu_window = (fileselector.FileSelectorWindow(
base_path,
callback=self._music_folder_selector_cb,
show_base_path=False,
valid_file_extensions=[],
allow_folders=True).get_root_widget())
def _music_file_selector_cb(self, result: Optional[str]) -> None:
if result is None:
self._callback(self._current_entry)
else:
self._callback({'type': 'musicFile', 'name': result})
def _music_folder_selector_cb(self, result: Optional[str]) -> None:
if result is None:
self._callback(self._current_entry)
else:
self._callback({'type': 'musicFolder', 'name': result})
def _on_default_press(self) -> None:
ba.containerwidget(edit=self._root_widget, transition='out_right')
self._callback(None)
def _on_cancel_press(self) -> None:
ba.containerwidget(edit=self._root_widget, transition='out_right')
self._callback(self._current_entry)
| 44.10628 | 79 | 0.585652 |
4a228cc1ca9a9c9805d97431166eb060c9bebdbb | 40 | py | Python | tests/mocks/pycopy-cpython_core/usocket.py | BradenM/micropython-stubber | 042aee27685dcf0152b6580c005f8a20a04f9d59 | [
"MIT"
] | 126 | 2019-07-19T14:42:41.000Z | 2022-03-21T22:22:19.000Z | tests/mocks/pycopy-cpython_core/usocket.py | BradenM/micropython-stubber | 042aee27685dcf0152b6580c005f8a20a04f9d59 | [
"MIT"
] | 176 | 2020-10-18T14:31:03.000Z | 2022-03-30T23:22:39.000Z | tests/mocks/pycopy-cpython_core/usocket.py | BradenM/micropython-stubber | 042aee27685dcf0152b6580c005f8a20a04f9d59 | [
"MIT"
] | 55 | 2019-08-02T09:32:33.000Z | 2021-12-22T11:25:51.000Z | import micropython
from socket import *
| 13.333333 | 20 | 0.825 |
4a228dd86aee14204e379678e6187c8cb85e7f5d | 1,206 | py | Python | ports/esp32/boards/ESP32_Dev_Board/modules/buzzer.py | tektecher/micropython | f1e76cb820671414ab6bb5dabc084a6b916d3d06 | [
"MIT"
] | null | null | null | ports/esp32/boards/ESP32_Dev_Board/modules/buzzer.py | tektecher/micropython | f1e76cb820671414ab6bb5dabc084a6b916d3d06 | [
"MIT"
] | null | null | null | ports/esp32/boards/ESP32_Dev_Board/modules/buzzer.py | tektecher/micropython | f1e76cb820671414ab6bb5dabc084a6b916d3d06 | [
"MIT"
] | 2 | 2022-02-21T17:07:34.000Z | 2022-03-01T19:45:48.000Z | # Dev by Sonthaya Nongnuch
# Edited and Modified by Saeed Desouky
from machine import Pin, PWM
from time import sleep
note_map = {
"C4": 261,
"C#4": 277,
"D4": 293,
"Eb4": 311,
"E4": 329,
"F4": 349,
"F#4": 369,
"G4": 391,
"G#4": 415,
"A4": 440,
"Bb4": 466,
"B4": 493,
"C5": 523,
"C#5": 554,
"D5": 587,
"Eb5": 622,
"E5": 659,
"F5": 698,
"F#5": 740,
"G5": 784,
"G#5": 831,
"A5": 880,
"Bb5": 932,
"B5": 988,
"C6": 1046,
"C#6": 1109,
"D6": 1175,
"Eb6": 1244,
"E6": 1318,
"F6": 1396,
"F#6": 1480,
"G6": 1568,
"G#6": 1661,
"A6": 1760,
"Bb6": 1865,
"B6": 1976,
"C7": 2093,
"SIL": 0
}
__buzzer = PWM(Pin(12), freq=2000, duty=0)
volume = 50
bpm = 120
def tone(freq=2093, duration=0.5):
__buzzer.freq(int(freq))
__buzzer.duty(int(volume / 100 * 512))
sleep(duration)
__buzzer.duty(0)
def on(freq=2000):
__buzzer.freq(int(freq))
__buzzer.duty(int(volume / 100 * 512))
def off():
__buzzer.duty(0)
def note(notes, duration=4):
quarter_delay = (60 * 1000) / bpm
delay = quarter_delay * duration
delay = delay / 1000 # mS -> S
for note in notes.split(" "):
if note in note_map:
tone(note_map[note], delay)
| 16.985915 | 42 | 0.555556 |
4a228e79f0f4e716999ce5a770c2e188929e0ea6 | 2,158 | py | Python | algDev/db/yf_updater.py | ajmal017/ralph-usa | 41a7f910da04cfa88f603313fad2ff44c82b9dd4 | [
"Apache-2.0"
] | null | null | null | algDev/db/yf_updater.py | ajmal017/ralph-usa | 41a7f910da04cfa88f603313fad2ff44c82b9dd4 | [
"Apache-2.0"
] | 7 | 2021-03-10T10:08:30.000Z | 2022-03-02T07:38:13.000Z | algDev/db/yf_updater.py | ajmal017/ralph-usa | 41a7f910da04cfa88f603313fad2ff44c82b9dd4 | [
"Apache-2.0"
] | 1 | 2020-04-17T19:15:06.000Z | 2020-04-17T19:15:06.000Z | import psycopg2
import credentials
from datetime import datetime, timedelta
import yfinance as yf
def update():
conn = psycopg2.connect(host="localhost",database="postgres", user=credentials.username, password=credentials.password)
conn.autocommit = True
cursor = conn.cursor()
# first, get the most recent date that's in our DB
getDateStatement = "SELECT date FROM Prices WHERE NOT (date < ANY(SELECT DISTINCT date FROM Prices))" # handle enumeration in the DB ya digg
cursor.execute(getDateStatement)
lastDate = cursor.fetchone() # un-nest from list of tuples
lastDate = lastDate[0]
getTickersStatement = "SELECT DISTINCT ticker FROM Prices ORDER BY ticker"
cursor.execute(getTickersStatement)
tickers = cursor.fetchall()
tickers = [ticker[0] for ticker in tickers]
postgres_insert_query = """ INSERT INTO Prices (ticker, date, open, high, low, close, volume, smavg) VALUES ('{}','{}',{},{},{},{},{},{})"""
for equity in tickers:
tickerName = equity.upper()
print('Updating:', tickerName)
ticker = yf.Ticker(tickerName)
hist = ticker.history(period="max")
for row in hist.iterrows():
try:
currentDate = row[0].to_pydatetime().date()
if currentDate <= lastDate:
continue
dateStr = str(currentDate.year) + '-' + str(currentDate.month) + '-' + str(currentDate.day)
toUse = (equity, dateStr, row[1][0], row[1][1], row[1][2], row[1][3], row[1][4], "NULL")
modified = False
lst = list(toUse)
for i in range(len(toUse)-1):
if not toUse[i]:
modified = True
lst[i] = "NULL"
if modified:
toUse = tuple(lst)
formatted = postgres_insert_query.format(*toUse)
cursor.execute(formatted)
except (Exception, psycopg2.Error) as error:
print ("Error while connecting to PostgreSQL", error)
if(__name__ == "__main__"):
update() | 38.535714 | 144 | 0.581557 |
4a228e8663e4a7a06bd6c02d23c062779de3523c | 9,133 | py | Python | train.py | hfznr/Udacity | 8d496f6e6581d1b069151ad83b523b3f9ab8f90d | [
"MIT"
] | null | null | null | train.py | hfznr/Udacity | 8d496f6e6581d1b069151ad83b523b3f9ab8f90d | [
"MIT"
] | null | null | null | train.py | hfznr/Udacity | 8d496f6e6581d1b069151ad83b523b3f9ab8f90d | [
"MIT"
] | null | null | null | # Imports here
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import torch
import torchvision
from PIL import Image
from collections import OrderedDict
from os import listdir
import json
from torch import nn,optim
import torch.nn.functional as F
from torchvision import datasets, transforms,models
import argparse
#map labels
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
def arg_parser():
parser = argparse.ArgumentParser(description="Train.py")
parser.add_argument('data_dir', nargs='*', action="store", default="./flowers")
parser.add_argument('--learning_rate', dest="learning_rate", action="store", default=0.001)
parser.add_argument('--hidden_units', type=int, dest="hidden_units", action="store", default=120)
parser.add_argument('--arch', dest="arch", action="store", default="vgg16", type = str)
parser.add_argument('--save_dir', dest="save_dir", action="store", default="./checkpoint.pth")
parser.add_argument('--gpu', dest="gpu", action="store", default="gpu")
parser.add_argument('--epochs', dest="epochs", action="store", type=int, default=2)
args = parser.parse_args()
return args
image_datasets = []
dataloaders = []
def load_data(train_dir,valid_dir,test_dir):
# TODO: Define your transforms for the training, validation, and testing sets
training_data_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])])
validation_data_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])])
testing_data_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])])
# TODO: Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=training_data_transforms) #0
valid_data = datasets.ImageFolder(valid_dir, transform=validation_data_transforms)#1
test_data = datasets.ImageFolder(test_dir, transform=testing_data_transforms)#2
image_datasets.append(train_data)
image_datasets.append(valid_dir)
image_datasets.append(train_data)
# TODO: Using the image datasets and the trainforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64,shuffle=True)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=64,shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64,shuffle=True)
dataloaders.append(trainloader)
dataloaders.append(validloader)
dataloaders.append(testloader)
def build_model(arch,hidden_value,dropout):
input_size = 25088
output_size = 102
if arch.lower() == "vgg16":
model = models.vgg16(pretrained=True)
model.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_value)),
('relu1', nn.ReLU()),
('dropout', nn.Dropout(dropout)),
('output', nn.Linear(hidden_value, output_size)),
('softmax', nn.LogSoftmax(dim=1))]))
else:
model = models.densenet121(pretrained=True)
input_size =1024
model.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_value)),
('relu1', nn.ReLU()),
('dropout', nn.Dropout(dropout)),
('output', nn.Linear(hidden_value, output_size)),
('softmax', nn.LogSoftmax(dim=1))]))
return model
def train_model(model, trainloader, validloader, epochs, print_every,device,learning_rate,criterion,optimizer):
#coppied from transfer learning example
model.to(device)
steps = 0
for epoch in range(epochs):
running_loss = 0
for inputs, labels in trainloader:
steps += 1
# Move input and label tensors to the default device
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
loss = criterion(logps, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
valid_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in validloader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
valid_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Validation loss: {valid_loss/len(validloader):.3f}.. "
f"Validation accuracy: {accuracy/len(validloader):.3f}")
running_loss = 0
model.train()
print("Train completed")
return model
def test_network(model, testloader, device,criterion,optimizer):
test_loss = 0
accuracy = 0
model.to(device)
#inputs are images
with torch.no_grad():
for inputs, labels in testloader:
inputs = inputs.to(device)
labels = labels.to(device)
log_ps = model.forward(inputs)
temp_loss = criterion(log_ps, labels)
test_loss += temp_loss.item()
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
result = 100 * accuracy/len(testloader)
result = 100 * accuracy/len(testloader)
print(f"Result of test Accuracy : % {result}")
def save_checkpoints(model, save_dir, train_data,criterion,optimizer,epochs,arch,hidden_units,dropout,lr):
model.class_to_idx = train_data.class_to_idx
checkpoint = {'structure' :arch,
'hidden_units':hidden_units,
'dropout':dropout,
'learning_rate':lr,
'no_of_epochs':epochs,
'state_dict':model.state_dict(),
'class_to_idx':model.class_to_idx}
"""
checkpoint = {'structure': 'vgg16',
'input_size': 25088,
'dropout': 0.1,
'output_size': 102,
'learning_rate': 0.001,
'classifier': model.classifier,
'epochs': epochs,
'optimizer': optimizer.state_dict(),
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx}"""
torch.save(checkpoint,save_dir)
args = arg_parser()
def main():
arch = args.arch
lr = args.learning_rate
hidden_units = args.hidden_units
epochs = args.epochs
gpu = args.gpu
save_dir = args.save_dir
data_dir = args.data_dir[0]
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
load_data(train_dir,valid_dir,test_dir)
train_data = image_datasets[0]
valid_data = image_datasets[1]
test_data = image_datasets[2]
trainloader = dataloaders[0]
validloader = dataloaders[1]
testloader = dataloaders[2]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = build_model(arch,hidden_units,0.1)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
print_every = 5
trained_model = train_model(model, trainloader, validloader, epochs, print_every,device,lr,criterion,optimizer)
for p in model.parameters():
p.requires_grad = False
test_network(trained_model, testloader, device,criterion,optimizer)
save_checkpoints(trained_model, save_dir, train_data,criterion,optimizer,epochs,arch,hidden_units,0.1,lr)
if __name__ == '__main__': main() | 33.454212 | 115 | 0.598927 |
4a228e911668a8659badca960c4bd33d5a86555c | 30,103 | py | Python | Lib/bsddb/dbtables.py | wchangque/bsddb3 | 477f9defcf61b47d2cc769374afff468e4b4a248 | [
"BSD-3-Clause"
] | 3 | 2016-08-25T15:37:55.000Z | 2018-06-26T02:15:24.000Z | Lib/bsddb/dbtables.py | shoemakerdr/bsddb3 | 61bffcd65474ede4d4317dd28789056e83eff541 | [
"BSD-3-Clause"
] | 1 | 2022-02-22T22:23:55.000Z | 2022-02-22T22:23:55.000Z | bsddb3/bsddb3-6.2.6/Lib/bsddb/dbtables.py | mpwillson/spambayes3 | b51d7bb9016066234ce88dad65faabed85f63d78 | [
"PSF-2.0"
] | null | null | null | #-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
# Copyright (C) 2002 Gregory P. Smith
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# -- Gregory P. Smith <[email protected]>
# This provides a simple database table interface built on top of
# the Python Berkeley DB 3 interface.
#
import re
import sys
import copy
import random
import struct
if sys.version_info[0] >= 3 :
import pickle
else :
import warnings
with warnings.catch_warnings() :
warnings.filterwarnings("ignore", category=DeprecationWarning)
import cPickle as pickle
from bsddb3 import db
class TableDBError(StandardError):
pass
class TableAlreadyExists(TableDBError):
pass
class Cond:
"""This condition matches everything"""
def __call__(self, s):
return 1
class ExactCond(Cond):
"""Acts as an exact match condition function"""
def __init__(self, strtomatch):
self.strtomatch = strtomatch
def __call__(self, s):
return s == self.strtomatch
class PrefixCond(Cond):
"""Acts as a condition function for matching a string prefix"""
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, s):
return s[:len(self.prefix)] == self.prefix
class PostfixCond(Cond):
"""Acts as a condition function for matching a string postfix"""
def __init__(self, postfix):
self.postfix = postfix
def __call__(self, s):
return s[-len(self.postfix):] == self.postfix
class LikeCond(Cond):
"""
Acts as a function that will match using an SQL 'LIKE' style
string. Case insensitive and % signs are wild cards.
This isn't perfect but it should work for the simple common cases.
"""
def __init__(self, likestr, re_flags=re.IGNORECASE):
# escape python re characters
chars_to_escape = '.*+()[]?'
for char in chars_to_escape :
likestr = likestr.replace(char, '\\'+char)
# convert %s to wildcards
self.likestr = likestr.replace('%', '.*')
self.re = re.compile('^'+self.likestr+'$', re_flags)
def __call__(self, s):
return self.re.match(s)
#
# keys used to store database metadata
#
_table_names_key = '__TABLE_NAMES__' # list of the tables in this db
_columns = '._COLUMNS__' # table_name+this key contains a list of columns
def _columns_key(table):
return table + _columns
#
# these keys are found within table sub databases
#
_data = '._DATA_.' # this+column+this+rowid key contains table data
_rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each
# row in the table. (no data is stored)
_rowid_str_len = 8 # length in bytes of the unique rowid strings
def _data_key(table, col, rowid):
return table + _data + col + _data + rowid
def _search_col_data_key(table, col):
return table + _data + col + _data
def _search_all_data_key(table):
return table + _data
def _rowid_key(table, rowid):
return table + _rowid + rowid + _rowid
def _search_rowid_key(table):
return table + _rowid
def contains_metastrings(s) :
"""Verify that the given string does not contain any
metadata strings that might interfere with dbtables database operation.
"""
if (s.find(_table_names_key) >= 0 or
s.find(_columns) >= 0 or
s.find(_data) >= 0 or
s.find(_rowid) >= 0):
# Then
return 1
else:
return 0
class bsdTableDB :
def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600,
recover=0, dbflags=0):
"""bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome Berkeley DB directory.
Use keyword arguments when calling this constructor.
"""
self.db = None
myflags = db.DB_THREAD
if create:
myflags |= db.DB_CREATE
flagsforenv = (db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG |
db.DB_INIT_TXN | dbflags)
# DB_AUTO_COMMIT isn't a valid flag for env.open()
try:
dbflags |= db.DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv = flagsforenv | db.DB_RECOVER
self.env = db.DBEnv()
# enable auto deadlock avoidance
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
self.env.open(dbhome, myflags | flagsforenv)
if truncate:
myflags |= db.DB_TRUNCATE
self.db = db.DB(self.env)
# this code relies on DBCursor.set* methods to raise exceptions
# rather than returning None
self.db.set_get_returns_none(1)
# allow duplicate entries [warning: be careful w/ metadata]
self.db.set_flags(db.DB_DUP)
self.db.open(filename, db.DB_BTREE, dbflags | myflags, mode)
self.dbfilename = filename
if sys.version_info[0] >= 3 :
class cursor_py3k(object) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
def close(self) :
return self._dbcursor.close()
def set_range(self, search) :
v = self._dbcursor.set_range(bytes(search, "iso8859-1"))
if v is not None :
v = (v[0].decode("iso8859-1"),
v[1].decode("iso8859-1"))
return v
def __next__(self) :
v = getattr(self._dbcursor, "next")()
if v is not None :
v = (v[0].decode("iso8859-1"),
v[1].decode("iso8859-1"))
return v
class db_py3k(object) :
def __init__(self, db) :
self._db = db
def cursor(self, txn=None) :
return cursor_py3k(self._db.cursor(txn=txn))
def has_key(self, key, txn=None) :
return getattr(self._db,"has_key")(bytes(key, "iso8859-1"),
txn=txn)
def put(self, key, value, flags=0, txn=None) :
key = bytes(key, "iso8859-1")
if value is not None :
value = bytes(value, "iso8859-1")
return self._db.put(key, value, flags=flags, txn=txn)
def put_bytes(self, key, value, txn=None) :
key = bytes(key, "iso8859-1")
return self._db.put(key, value, txn=txn)
def get(self, key, txn=None, flags=0) :
key = bytes(key, "iso8859-1")
v = self._db.get(key, txn=txn, flags=flags)
if v is not None :
v = v.decode("iso8859-1")
return v
def get_bytes(self, key, txn=None, flags=0) :
key = bytes(key, "iso8859-1")
return self._db.get(key, txn=txn, flags=flags)
def delete(self, key, txn=None) :
key = bytes(key, "iso8859-1")
return self._db.delete(key, txn=txn)
def close (self) :
return self._db.close()
self.db = db_py3k(self.db)
else : # Python 2.x
pass
# Initialize the table names list if this is a new database
txn = self.env.txn_begin()
try:
if not getattr(self.db, "has_key")(_table_names_key, txn):
getattr(self.db, "put_bytes", self.db.put) \
(_table_names_key, pickle.dumps([], 1), txn=txn)
# Yes, bare except
except:
txn.abort()
raise
else:
txn.commit()
# TODO verify more of the database's metadata?
self.__tablecolumns = {}
def __del__(self):
self.close()
def close(self):
if self.db is not None:
self.db.close()
self.db = None
if self.env is not None:
self.env.close()
self.env = None
def checkpoint(self, mins=0):
self.env.txn_checkpoint(mins)
def sync(self):
self.db.sync()
def _db_print(self) :
"""Print the database to stdout for debugging"""
print "******** Printing raw database for debugging ********"
cur = self.db.cursor()
try:
key, data = cur.first()
while 1:
print repr({key: data})
next = cur.next()
if next:
key, data = next
else:
cur.close()
return
except db.DBNotFoundError:
cur.close()
def CreateTable(self, table, columns):
"""CreateTable(table, columns) - Create a new table in the database.
raises TableDBError if it already exists or for other DB errors.
"""
assert isinstance(columns, list)
txn = None
try:
# checking sanity of the table and column names here on
# table creation will prevent problems elsewhere.
if contains_metastrings(table):
raise ValueError(
"bad table name: contains reserved metastrings")
for column in columns :
if contains_metastrings(column):
raise ValueError(
"bad column name: contains reserved metastrings")
columnlist_key = _columns_key(table)
if getattr(self.db, "has_key")(columnlist_key):
raise TableAlreadyExists, "table already exists"
txn = self.env.txn_begin()
# store the table's column info
getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
pickle.dumps(columns, 1), txn=txn)
# add the table name to the tablelist
tablelist = pickle.loads(getattr(self.db, "get_bytes",
self.db.get) (_table_names_key, txn=txn, flags=db.DB_RMW))
tablelist.append(table)
# delete 1st, in case we opened with DB_DUP
self.db.delete(_table_names_key, txn=txn)
getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
except db.DBError, dberror:
if txn:
txn.abort()
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
def ListTableColumns(self, table):
"""Return a list of columns in the given table.
[] if the table doesn't exist.
"""
assert isinstance(table, str)
if contains_metastrings(table):
raise ValueError, "bad table name: contains reserved metastrings"
columnlist_key = _columns_key(table)
if not getattr(self.db, "has_key")(columnlist_key):
return []
pickledcolumnlist = getattr(self.db, "get_bytes",
self.db.get)(columnlist_key)
if pickledcolumnlist:
return pickle.loads(pickledcolumnlist)
else:
return []
def ListTables(self):
"""Return a list of tables in this database."""
pickledtablelist = getattr(self.db, "get_bytes",
self.db.get)(_table_names_key)
if pickledtablelist:
return pickle.loads(pickledtablelist)
else:
return []
def CreateOrExtendTable(self, table, columns):
"""CreateOrExtendTable(table, columns)
Create a new table in the database.
If a table of this name already exists, extend it to have any
additional columns present in the given list as well as
all of its current columns.
"""
assert isinstance(columns, list)
try:
self.CreateTable(table, columns)
except TableAlreadyExists:
# the table already existed, add any new columns
txn = None
try:
columnlist_key = _columns_key(table)
txn = self.env.txn_begin()
# load the current column list
oldcolumnlist = pickle.loads(
getattr(self.db, "get_bytes",
self.db.get)(columnlist_key, txn=txn, flags=db.DB_RMW))
# create a hash table for fast lookups of column names in the
# loop below
oldcolumnhash = {}
for c in oldcolumnlist:
oldcolumnhash[c] = c
# create a new column list containing both the old and new
# column names
newcolumnlist = copy.copy(oldcolumnlist)
for c in columns:
if not c in oldcolumnhash:
newcolumnlist.append(c)
# store the table's new extended column list
if newcolumnlist != oldcolumnlist :
# delete the old one first since we opened with DB_DUP
self.db.delete(columnlist_key, txn=txn)
getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
pickle.dumps(newcolumnlist, 1),
txn=txn)
txn.commit()
txn = None
self.__load_column_info(table)
except db.DBError, dberror:
if txn:
txn.abort()
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
def __load_column_info(self, table) :
"""initialize the self.__tablecolumns dict"""
# check the column names
try:
tcolpickles = getattr(self.db, "get_bytes",
self.db.get)(_columns_key(table))
except db.DBNotFoundError:
raise TableDBError, "unknown table: %r" % (table,)
if not tcolpickles:
raise TableDBError, "unknown table: %r" % (table,)
self.__tablecolumns[table] = pickle.loads(tcolpickles)
def __new_rowid(self, table, txn) :
"""Create a new unique row identifier"""
unique = 0
while not unique:
# Generate a random 64-bit row ID string
# (note: might have <64 bits of true randomness
# but it's plenty for our database id needs!)
blist = []
for x in xrange(_rowid_str_len):
blist.append(random.randint(0,255))
newid = struct.pack('B'*_rowid_str_len, *blist)
if sys.version_info[0] >= 3 :
newid = newid.decode("iso8859-1") # 8 bits
# Guarantee uniqueness by adding this key to the database
try:
self.db.put(_rowid_key(table, newid), None, txn=txn,
flags=db.DB_NOOVERWRITE)
except db.DBKeyExistError:
pass
else:
unique = 1
return newid
def Insert(self, table, rowdict) :
"""Insert(table, datadict) - Insert a new row into the table
using the keys+values from rowdict as the column values.
"""
txn = None
try:
if not getattr(self.db, "has_key")(_columns_key(table)):
raise TableDBError, "unknown table"
# check the validity of each column name
if not table in self.__tablecolumns:
self.__load_column_info(table)
for column in rowdict.keys() :
if not self.__tablecolumns[table].count(column):
raise TableDBError, "unknown column: %r" % (column,)
# get a unique row identifier for this row
txn = self.env.txn_begin()
rowid = self.__new_rowid(table, txn=txn)
# insert the row values into the table database
for column, dataitem in rowdict.items():
# store the value
self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
txn.commit()
txn = None
except db.DBError, dberror:
# WIBNI we could just abort the txn and re-raise the exception?
# But no, because TableDBError is not related to DBError via
# inheritance, so it would be backwards incompatible. Do the next
# best thing.
info = sys.exc_info()
if txn:
txn.abort()
self.db.delete(_rowid_key(table, rowid))
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1], info[2]
else :
raise TableDBError, dberror.args[1], info[2]
def Modify(self, table, conditions={}, mappings={}):
"""Modify(table, conditions={}, mappings={}) - Modify items in rows matching 'conditions' using mapping functions in 'mappings'
* table - the table name
* conditions - a dictionary keyed on column names containing
a condition callable expecting the data string as an
argument and returning a boolean.
* mappings - a dictionary keyed on column names containing a
condition callable expecting the data string as an argument and
returning the new string for that column.
"""
try:
matching_rowids = self.__Select(table, [], conditions)
# modify only requested columns
columns = mappings.keys()
for rowid in matching_rowids.keys():
txn = None
try:
for column in columns:
txn = self.env.txn_begin()
# modify the requested column
try:
dataitem = self.db.get(
_data_key(table, column, rowid),
txn=txn)
self.db.delete(
_data_key(table, column, rowid),
txn=txn)
except db.DBNotFoundError:
# XXXXXXX row key somehow didn't exist, assume no
# error
dataitem = None
dataitem = mappings[column](dataitem)
if dataitem is not None:
self.db.put(
_data_key(table, column, rowid),
dataitem, txn=txn)
txn.commit()
txn = None
# catch all exceptions here since we call unknown callables
except:
if txn:
txn.abort()
raise
except db.DBError, dberror:
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
def Delete(self, table, conditions={}):
"""Delete(table, conditions) - Delete items matching the given
conditions from the table.
* conditions - a dictionary keyed on column names containing
condition functions expecting the data string as an
argument and returning a boolean.
"""
try:
matching_rowids = self.__Select(table, [], conditions)
# delete row data from all columns
columns = self.__tablecolumns[table]
for rowid in matching_rowids.keys():
txn = None
try:
txn = self.env.txn_begin()
for column in columns:
# delete the data key
try:
self.db.delete(_data_key(table, column, rowid),
txn=txn)
except db.DBNotFoundError:
# XXXXXXX column may not exist, assume no error
pass
try:
self.db.delete(_rowid_key(table, rowid), txn=txn)
except db.DBNotFoundError:
# XXXXXXX row key somehow didn't exist, assume no error
pass
txn.commit()
txn = None
except db.DBError, dberror:
if txn:
txn.abort()
raise
except db.DBError, dberror:
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
def Select(self, table, columns, conditions={}):
"""Select(table, columns, conditions) - retrieve specific row data
Returns a list of row column->value mapping dictionaries.
* columns - a list of which column data to return. If
columns is None, all columns will be returned.
* conditions - a dictionary keyed on column names
containing callable conditions expecting the data string as an
argument and returning a boolean.
"""
try:
if not table in self.__tablecolumns:
self.__load_column_info(table)
if columns is None:
columns = self.__tablecolumns[table]
matching_rowids = self.__Select(table, columns, conditions)
except db.DBError, dberror:
if sys.version_info < (2, 6) :
raise TableDBError, dberror[1]
else :
raise TableDBError, dberror.args[1]
# return the matches as a list of dictionaries
return matching_rowids.values()
def __Select(self, table, columns, conditions):
"""__Select() - Used to implement Select and Delete (above)
Returns a dictionary keyed on rowids containing dicts
holding the row data for columns listed in the columns param
that match the given conditions.
* conditions is a dictionary keyed on column names
containing callable conditions expecting the data string as an
argument and returning a boolean.
"""
# check the validity of each column name
if not table in self.__tablecolumns:
self.__load_column_info(table)
if columns is None:
columns = self.tablecolumns[table]
for column in (columns + conditions.keys()):
if not self.__tablecolumns[table].count(column):
raise TableDBError, "unknown column: %r" % (column,)
# keyed on rows that match so far, containings dicts keyed on
# column names containing the data for that row and column.
matching_rowids = {}
# keys are rowids that do not match
rejected_rowids = {}
# attempt to sort the conditions in such a way as to minimize full
# column lookups
def cmp_conditions(atuple, btuple):
a = atuple[1]
b = btuple[1]
if type(a) is type(b):
# Needed for python 3. "cmp" vanished in 3.0.1
def cmp(a, b) :
if a==b : return 0
if a<b : return -1
return 1
if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
# longest prefix first
return cmp(len(b.prefix), len(a.prefix))
if isinstance(a, LikeCond) and isinstance(b, LikeCond):
# longest likestr first
return cmp(len(b.likestr), len(a.likestr))
return 0
if isinstance(a, ExactCond):
return -1
if isinstance(b, ExactCond):
return 1
if isinstance(a, PrefixCond):
return -1
if isinstance(b, PrefixCond):
return 1
# leave all unknown condition callables alone as equals
return 0
if sys.version_info < (2, 6) :
conditionlist = conditions.items()
conditionlist.sort(cmp_conditions)
else : # Insertion Sort. Please, improve
conditionlist = []
for i in conditions.items() :
for j, k in enumerate(conditionlist) :
r = cmp_conditions(k, i)
if r == 1 :
conditionlist.insert(j, i)
break
else :
conditionlist.append(i)
# Apply conditions to column data to find what we want
cur = self.db.cursor()
column_num = -1
for column, condition in conditionlist:
column_num = column_num + 1
searchkey = _search_col_data_key(table, column)
# speedup: don't linear search columns within loop
if column in columns:
savethiscolumndata = 1 # save the data for return
else:
savethiscolumndata = 0 # data only used for selection
try:
key, data = cur.set_range(searchkey)
while key[:len(searchkey)] == searchkey:
# extract the rowid from the key
rowid = key[-_rowid_str_len:]
if not rowid in rejected_rowids:
# if no condition was specified or the condition
# succeeds, add row to our match list.
if not condition or condition(data):
if not rowid in matching_rowids:
matching_rowids[rowid] = {}
if savethiscolumndata:
matching_rowids[rowid][column] = data
else:
if rowid in matching_rowids:
del matching_rowids[rowid]
rejected_rowids[rowid] = rowid
key, data = cur.next()
except db.DBError, dberror:
if dberror.args[0] != db.DB_NOTFOUND:
raise
continue
cur.close()
# we're done selecting rows, garbage collect the reject list
del rejected_rowids
# extract any remaining desired column data from the
# database for the matching rows.
if len(columns) > 0:
for rowid, rowdata in matching_rowids.items():
for column in columns:
if column in rowdata:
continue
try:
rowdata[column] = self.db.get(
_data_key(table, column, rowid))
except db.DBError, dberror:
if sys.version_info < (2, 6) :
if dberror[0] != db.DB_NOTFOUND:
raise
else :
if dberror.args[0] != db.DB_NOTFOUND:
raise
rowdata[column] = None
# return the matches
return matching_rowids
def Drop(self, table):
"""Remove an entire table from the database"""
txn = None
try:
txn = self.env.txn_begin()
# delete the column list
self.db.delete(_columns_key(table), txn=txn)
cur = self.db.cursor(txn)
# delete all keys containing this tables column and row info
table_key = _search_all_data_key(table)
while 1:
try:
key, data = cur.set_range(table_key)
except db.DBNotFoundError:
break
# only delete items in this table
if key[:len(table_key)] != table_key:
break
cur.delete()
# delete all rowids used by this table
table_key = _search_rowid_key(table)
while 1:
try:
key, data = cur.set_range(table_key)
except db.DBNotFoundError:
break
# only delete items in this table
if key[:len(table_key)] != table_key:
break
cur.delete()
cur.close()
# delete the tablename from the table name list
tablelist = pickle.loads(
getattr(self.db, "get_bytes", self.db.get)(_table_names_key,
txn=txn, flags=db.DB_RMW))
try:
tablelist.remove(table)
except ValueError:
# hmm, it wasn't there, oh well, that's what we want.
pass
# delete 1st, incase we opened with DB_DUP
self.db.delete(_table_names_key, txn=txn)
getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
if table in self.__tablecolumns:
del self.__tablecolumns[table]
except db.DBError, dberror:
if txn:
txn.abort()
raise TableDBError(dberror.args[1])
| 36.710976 | 135 | 0.526227 |
4a228e95e305f0fa5b946dd6701da1bdb29f0c69 | 744 | py | Python | tests/test_holidata.py | xeals/holidata | c99a56b63b1cb9dc5f4f79f3de83ba3865215250 | [
"MIT"
] | null | null | null | tests/test_holidata.py | xeals/holidata | c99a56b63b1cb9dc5f4f79f3de83ba3865215250 | [
"MIT"
] | null | null | null | tests/test_holidata.py | xeals/holidata | c99a56b63b1cb9dc5f4f79f3de83ba3865215250 | [
"MIT"
] | null | null | null | import pytest
from snapshottest.file import FileSnapshot
from snapshottest.formatter import Formatter
from holidata import Locale
from tests import HOLIDATA_YEAR_MAX
@pytest.fixture(params=range(2011, HOLIDATA_YEAR_MAX))
def year(request):
return request.param
@pytest.fixture(params=Locale.plugins)
def locale(request, year):
return request.param(year)
def test_holidata_produces_holidays_for_locale_and_year(snapshot, tmpdir, locale):
temp_file = tmpdir.join('{}.{}.py'.format(locale.locale, locale.year))
export_data = [h.as_dict() for h in locale.holidays]
export_data.sort(key=lambda x: x['date'])
temp_file.write(Formatter().format(export_data, 0))
snapshot.assert_match(FileSnapshot(str(temp_file)))
| 27.555556 | 82 | 0.771505 |
4a22912547c3c22e46af40da3b58ca665bbaa110 | 1,323 | py | Python | test/runtime/frontend_test/chainer_test/functions_test/basic_math_test/neg_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | test/runtime/frontend_test/chainer_test/functions_test/basic_math_test/neg_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | test/runtime/frontend_test/chainer_test/functions_test/basic_math_test/neg_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | import chainer
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.placeholder import Placeholder
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable
@wrap_template
def template(description=""):
vx = chainer.Variable(np.random.rand(2, 4, 6, 8).astype(np.float32) - 0.5)
vy = -vx
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] F.Neg {description}",
graph=graph,
inputs={x: vx.data},
expected={y: vy.data},
)
def test():
template()
def test_with_placeholder():
vx = chainer.Variable(np.random.rand(1, 3, 16, 16).astype(np.float32))
vy = -vx
H = Placeholder(label="H")
W = Placeholder(label="W")
px = PlaceholderVariable([1, 3, H, W])
py = -px
graph = ChainerConverter().convert([px], [py])
x = graph.inputs[0]
y = graph.outputs[0]
H.value = 16
W.value = 16
generate_kernel_test_case(
description=f"[chainer] F.Neg with placeholder",
graph=graph,
backend=["webgpu", "webassembly"],
inputs={x: vx.data},
expected={y: vy.data},
)
| 24.054545 | 78 | 0.647014 |
4a2291904fbec9b1da9af24c71ac98a13e9907cf | 1,794 | py | Python | crawler/crawler/spiders/govnews.py | thienanh1999/final-thesis | f2def0eb77caa2124cd7d03a79460afb42098b84 | [
"Apache-2.0"
] | null | null | null | crawler/crawler/spiders/govnews.py | thienanh1999/final-thesis | f2def0eb77caa2124cd7d03a79460afb42098b84 | [
"Apache-2.0"
] | null | null | null | crawler/crawler/spiders/govnews.py | thienanh1999/final-thesis | f2def0eb77caa2124cd7d03a79460afb42098b84 | [
"Apache-2.0"
] | null | null | null | import scrapy
class MohSpider(scrapy.Spider):
name = 'govnews'
allowed_domains = ['ncov.moh.gov.vn']
start_urls = ['https://ncov.moh.gov.vn/vi/web/guest/tin-tuc']
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36'
}
def parse(self, response, **kwargs):
for article in response.xpath(
"//div[@class='portlet-body']//a[contains(@href, 'https://ncov.moh.gov.vn/vi/web/guest/-/')]"):
if article.attrib['href'] is not None:
yield response.follow(article.attrib['href'], callback=self.parse_article)
next_page = response.xpath("//a[contains(@href, 'https://ncov.moh.gov.vn/vi/web/guest/tin')]")[-1] \
.attrib['href']
if next_page is not None:
yield response.follow(
url=next_page,
headers=self.headers,
callback=self.parse
)
@staticmethod
def parse_article(response):
title = ''.join(response.css('h5 *::text').getall()).replace('\n', '')
published_time = response.css('span.text-ngayxam-page::text')[0].get().replace('(', '').replace(')', '')
sapo = response.css('strong.text-muted-ncov *::text').get()
paragraphs = response.xpath('//div[@id=$val]//p//text()', val='content-detail').getall()
content = []
for idx in range(len(paragraphs) - 3):
content.append(paragraphs[idx])
author = paragraphs[-3]
source = paragraphs[-1]
yield {
'title': title,
'published_time': published_time,
'sapo': sapo,
'content': content,
'author': author,
'source': source
}
| 37.375 | 129 | 0.557414 |
4a2291ec69787dedc7d7b4a508b03ec663747e47 | 4,042 | py | Python | zipline/finance/transaction.py | magnumjoseph/ziplineaqm | 0b3ac565957b17079602658bbae8a48751956196 | [
"Apache-2.0"
] | 2 | 2019-02-12T02:22:34.000Z | 2019-06-04T22:09:24.000Z | zipline/finance/transaction.py | magnumjoseph/ziplineaqm | 0b3ac565957b17079602658bbae8a48751956196 | [
"Apache-2.0"
] | null | null | null | zipline/finance/transaction.py | magnumjoseph/ziplineaqm | 0b3ac565957b17079602658bbae8a48751956196 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from copy import copy
from zipline.assets import Asset
from zipline.protocol import DATASOURCE_TYPE
from zipline.utils.input_validation import expect_types
class Transaction(object):
@expect_types(asset=Asset)
def __init__(self, asset, amount, dt, price, order_id):
self.asset = asset
self.amount = amount
self.dt = dt
self.price = price
self.order_id = order_id
self.type = DATASOURCE_TYPE.TRANSACTION
def __getitem__(self, name):
return self.__dict__[name]
def __repr__(self):
template = (
"{cls}(asset={asset}, dt={dt},"
" amount={amount}, price={price})"
)
return template.format(
cls=type(self).__name__,
asset=self.asset,
dt=self.dt,
amount=self.amount,
price=self.price
)
def to_dict(self):
py = copy(self.__dict__)
del py['type']
del py['asset']
# Adding 'sid' for backwards compatibility with downstrean consumers.
py['sid'] = self.asset
# If you think this looks dumb, that is because it is! We once stored
# commission here, but haven't for over a year. I don't want to change
# the perf packet structure yet.
py['commission'] = None
return py
def create_transaction(order, dt, price, amount):
# floor the amount to protect against non-whole number orders
# TODO: Investigate whether we can add a robust check in blotter
# and/or tradesimulation, as well.
amount_magnitude = int(abs(amount))
if amount_magnitude < 1:
raise Exception("Transaction magnitude must be at least 1.")
transaction = Transaction(
asset=order.asset,
amount=int(amount),
dt=dt,
price=price,
order_id=order.id
)
return transaction
# CHG [joseph]: return ``commission`` and ``commission_type``
class AQMTransaction(Transaction):
@expect_types(asset=Asset)
def __init__(self, asset, amount, dt, price, order_id, commission, commission_type):
super(AQMTransaction, self).__init__(asset, amount, dt, price, order_id)
self.commission = commission
self.commission_type = commission_type
def __repr__(self):
template = (
"{cls}(asset={asset}, dt={dt},"
" amount={amount}, price={price},"
" commission={commission}, commission_type={commission_type}),"
)
return template.format(
cls=type(self).__name__,
asset=self.asset,
dt=self.dt,
amount=self.amount,
price=self.price,
commission=self.commission,
commission_type=self.commission_type,
)
def create_aqm_transaction(order, dt, price, amount, commission, commission_type):
# floor the amount to protect against non-whole number orders
# TODO: Investigate whether we can add a robust check in blotter
# and/or tradesimulation, as well.
amount_magnitude = int(abs(amount))
if amount_magnitude < 1:
raise Exception("Transaction magnitude must be at least 1.")
transaction = AQMTransaction(
asset=order.asset,
amount=int(amount),
dt=dt,
price=price,
order_id=order.id,
commission=commission,
commission_type=commission_type,
)
return transaction
| 30.390977 | 88 | 0.64473 |
4a2291fab750190e4e6f7c80e7322b5cffba830f | 1,002 | py | Python | Python/Ultrassom.py | rodrigoferrazazevedo/bengalaiot | 3ad68a80db64701b4683cbc3806dcef2e75a3c98 | [
"MIT"
] | null | null | null | Python/Ultrassom.py | rodrigoferrazazevedo/bengalaiot | 3ad68a80db64701b4683cbc3806dcef2e75a3c98 | [
"MIT"
] | null | null | null | Python/Ultrassom.py | rodrigoferrazazevedo/bengalaiot | 3ad68a80db64701b4683cbc3806dcef2e75a3c98 | [
"MIT"
] | null | null | null | import mraa
import time
import math
import pygame
trigger = mraa.Gpio(32)
echo = mraa.Gpio(27)
trigger.dir(mraa.DIR_OUT)
echo.dir(mraa.DIR_IN)
D = 200
speed = 34000
pulse_start = time.time()
pulse_end = time.time()
print "Iniciando leitura ultrassom:"
time.sleep(3)
timeout=time.time()+D
while True:
trigger.write(0)
time.sleep(0.75)
trigger.write(1)
time.sleep(0.0001)
trigger.write(0)
while echo.read()==0:
pulse_start = time.time()
while echo.read()==1:
pulse_end=time.time()
if echo.read()==0:
pulse_duration=pulse_end-pulse_start
distance=pulse_duration*speed/2
distance=round(distance,0)
print "Distancia:",distance,"cm"
if distance < 10:
print "Perto demais"
pygame.init()
pygame.mixer.music.load("ding.ogg")
pygame.mixer.music.play()
if time.time() > timeout:
break
| 20.04 | 47 | 0.57984 |
4a2292c5ce4b59bf5c00917e2403e2949fd2eb49 | 2,449 | py | Python | exportcomments/base.py | exportcomments/exportcomments-python | 5f7f836fb9195f02dbdb74432c879d327aa98029 | [
"MIT"
] | 8 | 2019-11-25T15:45:46.000Z | 2021-12-29T12:03:38.000Z | exportcomments/base.py | exportcomments/exportcomments-python | 5f7f836fb9195f02dbdb74432c879d327aa98029 | [
"MIT"
] | 1 | 2020-11-02T00:25:10.000Z | 2020-11-02T07:42:26.000Z | exportcomments/base.py | exportcomments/exportcomments-python | 5f7f836fb9195f02dbdb74432c879d327aa98029 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from exportcomments.settings import DEFAULT_BASE_URL
import json
import pkg_resources
import requests
import six
from six.moves.urllib.parse import urlencode
import time
try:
version = pkg_resources.get_distribution('exportcomments').version
except Exception:
version = 'noversion'
class ModelEndpointSet(object):
def __init__(self, token, base_url=DEFAULT_BASE_URL):
self.token = token
self.base_url = base_url
def _add_action_or_query_string(self, url, action, query_string):
if action is not None:
url += '{}/'.format(action)
if query_string is not None:
url += '?' + urlencode(query_string)
return url
def get_list_url(self, action=None, query_string=None):
url = '{}/exports/me'.format(self.base_url)
return self._add_action_or_query_string(url, action, query_string)
def get_detail_url(self, query_string=None):
url = '{}/export'.format(self.base_url)
return self._add_action_or_query_string(url, None, query_string)
def make_request(self, method, url, data=None, retry_if_throttled=True, params=None):
if data is not None:
data = json.dumps(data)
retries_left = 3
while retries_left:
response = requests.request(method, url, data=data, params=params, headers={
'X-AUTH-TOKEN': self.token,
'Content-Type': 'application/json',
'User-Agent': 'python-sdk-{}'.format(version),
})
if response.content:
body = response.json()
if retry_if_throttled and response.status_code == 429:
error_code = body.get('error_code')
wait = None
if error_code in ('PLAN_RATE_LIMIT', 'CONCURRENCY_RATE_LIMIT'):
wait = int(body.get('seconds_to_wait', 2))
if wait:
time.sleep(wait)
retries_left -= 1
continue
return response
return response
def remove_none_value(self, d):
return {k: v for k, v in six.iteritems(d) if v is not None}
| 33.547945 | 89 | 0.602287 |
4a22939b33361c5bb0b1555b1fa2349604714829 | 16 | py | Python | python/ql/src/Imports/ImportTwiceOnALine.py | vadi2/codeql | a806a4f08696d241ab295a286999251b56a6860c | [
"MIT"
] | 4,036 | 2020-04-29T00:09:57.000Z | 2022-03-31T14:16:38.000Z | python/ql/src/Imports/ImportTwiceOnALine.py | vadi2/codeql | a806a4f08696d241ab295a286999251b56a6860c | [
"MIT"
] | 2,970 | 2020-04-28T17:24:18.000Z | 2022-03-31T22:40:46.000Z | python/ql/src/Imports/ImportTwiceOnALine.py | ScriptBox99/github-codeql | 2ecf0d3264db8fb4904b2056964da469372a235c | [
"MIT"
] | 794 | 2020-04-29T00:28:25.000Z | 2022-03-30T08:21:46.000Z | import xxx, yyy
| 8 | 15 | 0.75 |
4a2293dce462e78de5b5a0f8e1826a9fd749c647 | 5,148 | py | Python | py/gps_building_blocks/airflow/utils/retry_utils.py | isabella232/gps_building_blocks | 86ef8be60a42cd12e27696007589388b7b053f4f | [
"Apache-2.0"
] | 30 | 2020-03-13T09:56:37.000Z | 2022-02-16T01:47:29.000Z | py/gps_building_blocks/airflow/utils/retry_utils.py | google/gps_building_blocks | 385ea06f3e84047e08e120791281aac02f028a81 | [
"Apache-2.0"
] | 32 | 2020-04-16T14:28:37.000Z | 2022-02-10T03:36:33.000Z | py/gps_building_blocks/airflow/utils/retry_utils.py | isabella232/gps_building_blocks | 86ef8be60a42cd12e27696007589388b7b053f4f | [
"Apache-2.0"
] | 13 | 2020-04-15T08:10:51.000Z | 2021-10-04T23:49:46.000Z | # python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility file for all retry related utils.
All retry consts and retry wrapper should be in this file for
consistent use across the component.
Current implementation uses this retry library:
https://github.com/jd/tenacity/tree/master/tenacity
TODO(): Find a better way to represent retriable and non-retraiable http
status codes.
Example usage:
@logged_retry_on_retriable_http_error()
def function_to_retry_on_retriable_http_error():
pass
"""
import functools
import logging
from typing import Callable, TypeVar
from airflow import exceptions
from googleapiclient import errors
import tenacity
_RT = TypeVar('_RT') # General return variable
_RETRY_UTILS_MAX_RETRIES = 5
_RETRY_UTILS_RETRIABLE_STATUS_CODES = (429, # Too Many Requests
500, # Internal Server Error
503) # Service Unavailable
_LOGGER = logging.getLogger(__name__)
def _is_retriable_http_error(error: errors.HttpError) -> bool:
"""Checks if HttpError is in _RETRY_UTILS_RETRIABLE_STATUS_CODES.
This function requires HttpError to have a valid response.
Args:
error: The http error to check.
Returns:
True if HttpError is retriable, otherwise False.
"""
if ('resp' in error.__dict__ and
error.__dict__['resp'].status in _RETRY_UTILS_RETRIABLE_STATUS_CODES):
return True
return False
def _is_retriable_http_airflow_exception(
error: exceptions.AirflowException) -> bool:
"""Checks if AirflowException is raised by an http error in specific codes.
Such AirflowException is thrown in airflow.hooks.http_hook.HttpHook. This
function requires AirflowException to have an error message with specified
format. The format is defined in
airflow.hooks.http_hook.HttpHook.check_response, which is
{response.status_code}:{response.reason}. The retriable status codes is
defined in _RETRY_UTILS_RETRIABLE_STATUS_CODES.
Args:
error: The airflow exception to check.
Returns:
True if AirflowException is raised by a retriable http error, otherwise
False.
"""
text = str(error)
status_code = text.split(':')[0]
try:
if int(status_code) in _RETRY_UTILS_RETRIABLE_STATUS_CODES:
return True
return False
except ValueError:
return False
def logged_retry_on_retriable_exception(
function: Callable[..., _RT],
is_retriable: Callable[..., bool]) -> Callable[..., _RT]:
"""Applies a decorator for retrying a function on retriable exception.
Wraps a retry decorator for common parameters setting across the component.
The returned decorator will retry the decorated function should it raise a
retriable error. The function is_retriable determines whether an error is
retriable or not.
The decorated function will be retried up to _RETRY_UTILS_MAX_RETRIES times,
with an exponential backoff strategy for delays between each retry starting
from _RETRY_UTILS_INITIAL_DELAY_SEC for the first delay.
Should the maximum retry number be reached, only the error raised during the
last retry will be raised.
Each retry will be logged to _LOGGER.
Args:
function: The function to decorate.
is_retriable: The function to determine whether an error is retriable or
not.
Returns:
Decorated function.
"""
@functools.wraps(function)
def decorated_function(*args, **kwargs) -> _RT:
return tenacity.retry(
retry=tenacity.retry_if_exception(is_retriable),
stop=tenacity.stop.stop_after_attempt(_RETRY_UTILS_MAX_RETRIES),
wait=tenacity.wait.wait_exponential(max=_RETRY_UTILS_MAX_RETRIES),
after=tenacity.after.after_log(_LOGGER, logging.DEBUG),
reraise=True
)(function)(*args, **kwargs)
return decorated_function
def logged_retry_on_retriable_http_error(function: Callable[..., _RT]
) -> Callable[..., _RT]:
"""Applies a decorator for retrying a function on retriable http error.
Args:
function: The function to decorate.
Returns:
Decorated function.
"""
return logged_retry_on_retriable_exception(function, _is_retriable_http_error)
def logged_retry_on_retriable_http_airflow_exception(
function: Callable[..., _RT]) -> Callable[..., _RT]:
"""Applies a decorator for retrying a function on airflow exception raised by retriable http error.
Args:
function: The function to decorate.
Returns:
Decorated function.
"""
return logged_retry_on_retriable_exception(
function, _is_retriable_http_airflow_exception)
| 31.2 | 101 | 0.741841 |
4a2294242a9699f88983d21d517e5ba2803d3c9e | 4,625 | py | Python | librtmp/amf.py | yn295636/python-librtmp | 2bfdc4238e6247c4abd0745f8c7abe43543f0b1c | [
"BSD-2-Clause"
] | 128 | 2015-02-11T15:09:25.000Z | 2022-03-28T03:31:41.000Z | librtmp/amf.py | yn295636/python-librtmp | 2bfdc4238e6247c4abd0745f8c7abe43543f0b1c | [
"BSD-2-Clause"
] | 27 | 2015-01-07T14:06:42.000Z | 2021-11-28T09:00:38.000Z | librtmp/amf.py | yn295636/python-librtmp | 2bfdc4238e6247c4abd0745f8c7abe43543f0b1c | [
"BSD-2-Clause"
] | 40 | 2015-02-11T21:13:41.000Z | 2021-12-12T22:31:01.000Z | try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
from . import ffi, librtmp
from .aval import AVal
from .compat import is_py2, bytes, range
from .exceptions import AMFError
AMF_STRING_TYPES = (librtmp.AMF_STRING, librtmp.AMF_LONG_STRING)
AMF_OBJECT_DICT_TYPES = (librtmp.AMF_OBJECT, librtmp.AMF_ECMA_ARRAY)
__all__ = ["AMFObject", "decode_amf", "encode_amf"]
class AMFObject(dict):
pass
def _create_buffer(size):
pbuf = ffi.new("char[]", size)
pend = pbuf + size
buf = ffi.buffer(pbuf, size)
return pbuf, pend, buf
def _encode_key_name(key):
key = bytes(key, "utf8")
key_len = len(key)
pbuf, pend, buf = _create_buffer(key_len + 2)
librtmp.AMF_EncodeInt16(pbuf, pend, key_len)
buf[2:key_len + 2] = key
return buf[:]
@singledispatch
def encoder(val):
raise AMFError("Unable to encode '{0}' type".format(type(val).__name__))
@encoder.register(type(None))
def _encode_none(val):
return bytes((librtmp.AMF_NULL,))
@encoder.register(str)
def _encode_str(val):
val = AVal(val)
pbuf, pend, buf = _create_buffer(val.aval.av_len + 1 + 4)
res = librtmp.AMF_EncodeString(pbuf, pend, val.aval)
size = res - pbuf
return buf[:size]
if is_py2:
encoder.register(unicode, _encode_str)
@encoder.register(float)
@encoder.register(int)
def _encode_number(val):
val = float(val)
pbuf, pend, buf = _create_buffer(9)
librtmp.AMF_EncodeNumber(pbuf, pend, val)
return buf[:]
if is_py2:
encoder.register(long, _encode_number)
@encoder.register(bool)
def _encode_boolean(val):
pbuf, pend, buf = _create_buffer(2)
librtmp.AMF_EncodeBoolean(pbuf, pend, int(val))
return buf[:]
@encoder.register(AMFObject)
def _encode_object(val):
phead, headend, head = _create_buffer(4)
head[0] = bytes((librtmp.AMF_OBJECT,))
librtmp.AMF_EncodeInt24(phead + 1, headend, librtmp.AMF_OBJECT_END)
body = bytearray()
for key, value in val.items():
body += _encode_key_name(key)
body += encoder(value)
return head[:1] + bytes(body) + head[1:]
@encoder.register(dict)
def _encode_ecma_array(val):
phead, headend, head = _create_buffer(8)
head[0] = bytes((librtmp.AMF_ECMA_ARRAY,))
librtmp.AMF_EncodeInt32(phead + 1, headend, len(val))
librtmp.AMF_EncodeInt24(phead + 5, headend, librtmp.AMF_OBJECT_END)
body = bytearray()
for key, value in val.items():
body += _encode_key_name(key)
body += encoder(value)
return head[:5] + bytes(body) + head[5:]
@encoder.register(list)
def _encode_array(val):
phead, headend, head = _create_buffer(5)
head[0] = bytes((librtmp.AMF_STRICT_ARRAY,))
librtmp.AMF_EncodeInt32(phead + 1, headend, len(val))
body = bytearray()
for value in val:
body += encoder(value)
return head[:] + bytes(body)
def _decode_prop(prop):
prop_type = librtmp.AMFProp_GetType(prop)
if prop_type == librtmp.AMF_NUMBER:
val = librtmp.AMFProp_GetNumber(prop)
elif prop_type in AMF_STRING_TYPES:
aval = AVal()
librtmp.AMFProp_GetString(prop, aval.aval)
val = aval.value.decode("utf8", "ignore")
elif prop_type == librtmp.AMF_BOOLEAN:
val = bool(librtmp.AMFProp_GetBoolean(prop))
elif prop_type in AMF_OBJECT_DICT_TYPES:
if prop_type == librtmp.AMF_OBJECT:
val = AMFObject()
else:
val = dict()
for key, value in _decode_prop_obj(prop):
val[key] = value
elif prop_type == librtmp.AMF_STRICT_ARRAY:
val = []
for key, value in _decode_prop_obj(prop):
val.append(value)
else:
val = None
return val
def _decode_prop_obj(prop):
obj = ffi.new("AMFObject*")
librtmp.AMFProp_GetObject(prop, obj)
prop_count = librtmp.AMF_CountProp(obj)
for i in range(prop_count):
prop = librtmp.AMF_GetProp(obj, ffi.NULL, i)
key = AVal()
librtmp.AMFProp_GetName(prop, key.aval)
key = key.value.decode("utf8", "ignore")
value = _decode_prop(prop)
yield key, value
def encode_amf(value):
return encoder(value)
def decode_amf(body):
obj = ffi.new("AMFObject*")
res = librtmp.AMF_Decode(obj, body, len(body), 0)
if res == ffi.NULL:
raise AMFError("Unable to decode AMF data")
rval = []
prop_count = librtmp.AMF_CountProp(obj)
for i in range(prop_count):
prop = librtmp.AMF_GetProp(obj, ffi.NULL, i)
val = _decode_prop(prop)
rval.append(val)
return rval
| 23.241206 | 76 | 0.659027 |
4a229436ff16720b0de5b2adc3bc3f37a90662dd | 4,136 | py | Python | Bio/SeqIO/IgIO.py | bioinf-mcb/biopython | 1a1f4a7ee4e0efba517d3d607c56c27e72e399cc | [
"BSD-3-Clause"
] | null | null | null | Bio/SeqIO/IgIO.py | bioinf-mcb/biopython | 1a1f4a7ee4e0efba517d3d607c56c27e72e399cc | [
"BSD-3-Clause"
] | null | null | null | Bio/SeqIO/IgIO.py | bioinf-mcb/biopython | 1a1f4a7ee4e0efba517d3d607c56c27e72e399cc | [
"BSD-3-Clause"
] | 1 | 2020-03-19T17:06:16.000Z | 2020-03-19T17:06:16.000Z | # Copyright 2008-2015 by Peter Cock. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.SeqIO support for the "ig" (IntelliGenetics or MASE) file format.
This module is for reading and writing IntelliGenetics format files as
SeqRecord objects. This file format appears to be the same as the MASE
multiple sequence alignment format.
You are expected to use this module via the Bio.SeqIO functions.
"""
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
def _parse(handle, alphabet):
# Skip any file header text before the first record (;; lines)
for line in handle:
if not line.startswith(";;"):
break
else:
# Empty file, or header only
return
if line[0] != ";":
raise ValueError("Records should start with ';' and not:\n%r" % line)
while line:
# Now iterate over the records
# Try and agree with SeqRecord convention from the GenBank parser,
# (and followed in the SwissProt parser) which stores the comments
# as a long string with newlines under annotations key 'comment'.
# Note some examples use "; ..." and others ";..."
comment_lines = []
while line.startswith(";"):
# TODO - Extract identifier from lines like "LOCUS\tB_SF2"?
comment_lines.append(line[1:].strip())
line = next(handle)
title = line.rstrip()
seq_lines = []
for line in handle:
if line[0] == ";":
break
# Remove trailing whitespace, and any internal spaces
seq_lines.append(line.rstrip().replace(" ", ""))
else:
line = None
seq_str = "".join(seq_lines)
if seq_str.endswith("1"):
# Remove the optional terminator (digit one)
seq_str = seq_str[:-1]
if "1" in seq_str:
raise ValueError("Potential terminator digit one found within sequence.")
# Return the record and then continue...
record = SeqRecord(Seq(seq_str, alphabet), id=title, name=title)
record.annotations["comment"] = "\n".join(comment_lines)
yield record
# We should be at the end of the file now
assert not line
def IgIterator(source, alphabet=single_letter_alphabet):
"""Iterate over IntelliGenetics records (as SeqRecord objects).
source - file-like object opened in text mode, or a path to a file
alphabet - optional alphabet
The optional free format file header lines (which start with two
semi-colons) are ignored.
The free format commentary lines at the start of each record (which
start with a semi-colon) are recorded as a single string with embedded
new line characters in the SeqRecord's annotations dictionary under the
key 'comment'.
Examples
--------
>>> with open("IntelliGenetics/TAT_mase_nuc.txt") as handle:
... for record in IgIterator(handle):
... print("%s length %i" % (record.id, len(record)))
...
A_U455 length 303
B_HXB2R length 306
C_UG268A length 267
D_ELI length 309
F_BZ163A length 309
O_ANT70 length 342
O_MVP5180 length 348
CPZGAB length 309
CPZANT length 309
A_ROD length 390
B_EHOA length 420
D_MM251 length 390
STM_STM length 387
VER_AGM3 length 354
GRI_AGM677 length 264
SAB_SAB1C length 219
SYK_SYK length 330
"""
try:
handle = open(source)
except TypeError:
handle = source
if handle.read(0) != "":
raise ValueError(
"IntelliGenetics files must be opened in text mode."
) from None
try:
yield from _parse(handle, alphabet)
finally:
if handle is not source:
handle.close()
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest(verbose=0)
| 31.333333 | 85 | 0.642892 |
4a2294c64152cf6107175b8ad3ae9351af9474d5 | 3,667 | py | Python | edabit/hard/first_repeating_character/first_repeating_character.py | ticotheps/practice_problems | 943c5ab9eebeac4e5cf162adbdc681119603dc36 | [
"MIT"
] | null | null | null | edabit/hard/first_repeating_character/first_repeating_character.py | ticotheps/practice_problems | 943c5ab9eebeac4e5cf162adbdc681119603dc36 | [
"MIT"
] | null | null | null | edabit/hard/first_repeating_character/first_repeating_character.py | ticotheps/practice_problems | 943c5ab9eebeac4e5cf162adbdc681119603dc36 | [
"MIT"
] | null | null | null | """
FIND FIRST CHARACTER THAT REPEATS
Create a function that takes a string and returns the first character that
repeats. If there is no repeat of a character, then return '-1' (string).
Examples:
- first_repeat('legolas') -> 'l'
- first_repeat('Gandalf') -> 'a'
- first_repeat('Balrog') -> '-1'
- first_repeat('Isildur') -> '-1' // Case-sensitive 'I' != 'i'
Notes:
- Tests are case-sensitive.
"""
"""
U.P.E.R. Problem-Solving Framework
PHASE I: UNDERSTAND
Objective:
- Write an algorithm that takes in a single input (of string data type) and
returns a single output (of string or integer data type).
Expected Input(s):
- Number Of: 1
- Data Type: string
- Var Name: 'input_str'
Expected Output(s):
- Number Of: 1
- Data Type: string
- Var Name: 'repeating_char'
Constraints:
- Tests are case-sensitive.
- i.e. - "I" != "i"
PHASE II: PLAN
Brute Force Solution (nested 'for' loops):
(1) Define a function that takes in a single input string argument and
returns a single output string or integer depending on the existence of
repeating characters.
(2) Declare a var, 'repeating_char', that will hold the first character that is found to repeat itself in the input string. Initialize this var with a value of None.
(3) Use an outer 'for' loop to iterate through each character in the input string. This will provide access to each letter of the input string for comparison purposes.
(4) Nest an inner 'for' loop inside of the outer 'for' loop to iterate through each of the same characters in the input string for a second time. This second iteration will enable each character to be compared to itself and to each of the other characters.
(5) Inside of the inner 'for' loop, evaluate whether or not the iterated element, 'j'(of the inner 'for' loop), is a repeating character of the iterated element, 'i' (of the outer 'for' loop).
(a) If it is a repeating character, set the value of 'repeating_char' equal to 'i'.
(b) If it is NOT a repeating character, do nothing.
(6) If no repeating characters were found, set the value of 'repeating_char' to '-1'.
(7) Return the value of 'repeating_char'.
PHASE III: EXECUTE (Please see below)
PHASE IV: REFLECT/REFACTOR
Brute Force Solution:
- Asymptotic Analysis:
- Time Complexity: O(n^2) -> 'quadratic'
- Space Complexity: O(1) -> 'constant'
- Could we improve the time or space complexity of this solution?
- Yes. We could cache characters in a python dictionary by ensuring that a key:value pair exists in the dictionary for each character. The lookup for dictionaries is O(1) time complexity.
- Please see the first_repeate_optimized() solution below.
Optimized Solution:
- Asymptotic Analysis:
- Time Complexity: O(n) -> 'linear'
- Space Complexity: O(n) -> 'linear'
"""
def first_repeat(chars):
repeating_char = None
for i in range(0, len(chars)):
for j in range(0, len(chars)):
if i != j:
if chars[i] == chars[j]:
repeating_char = chars[j]
return repeating_char
repeating_char = '-1'
return repeating_char
def first_repeat_optimized(chars):
cache = {}
repeating_char = None
for i in range(0, len(chars)):
if chars[i] not in cache:
cache[chars[i]] = True
else:
repeating_char = chars[i]
return repeating_char
repeating_char = '-1'
return repeating_char | 34.59434 | 260 | 0.647123 |
4a22954588bc2b02eab5b725a75f81321b094d9a | 3,347 | py | Python | examples/Radar_Server_Level_3.py | DanielWatkins/siphon | 4c6740c2f8030ec1a23cafd8b8b9713dcd382cb2 | [
"BSD-3-Clause"
] | 164 | 2015-05-25T22:28:38.000Z | 2022-02-07T11:09:16.000Z | examples/Radar_Server_Level_3.py | DanielWatkins/siphon | 4c6740c2f8030ec1a23cafd8b8b9713dcd382cb2 | [
"BSD-3-Clause"
] | 369 | 2015-05-20T17:03:38.000Z | 2022-03-24T15:29:59.000Z | examples/Radar_Server_Level_3.py | DanielWatkins/siphon | 4c6740c2f8030ec1a23cafd8b8b9713dcd382cb2 | [
"BSD-3-Clause"
] | 79 | 2015-05-15T19:31:34.000Z | 2021-06-27T20:34:38.000Z | # Copyright (c) 2013-2015 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
=======================
TDS Radar Query Service
=======================
Use Siphon to get NEXRAD Level 3 data from a TDS.
"""
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from siphon.cdmr import Dataset
from siphon.radarserver import get_radarserver_datasets, RadarServer
###########################################
# First, point to the top-level thredds radar server accessor to find what datasets are
# available.
ds = get_radarserver_datasets('http://thredds.ucar.edu/thredds/')
print(list(ds))
###########################################
# Now create an instance of RadarServer to point to the appropriate
# radar server access URL. This is pulled from the catalog reference url.
url = ds['NEXRAD Level III Radar from IDD'].follow().catalog_url
rs = RadarServer(url)
###########################################
# Look at the variables available in this dataset
print(rs.variables)
###########################################
# Create a new query object to help request the data. Using the chaining
# methods, ask for data from radar FTG (Denver) for now for the product
# N0Q, which is reflectivity data for the lowest tilt. We see that when the query
# is represented as a string, it shows the encoded URL.
query = rs.query()
query.stations('FTG').time(datetime.utcnow()).variables('N0Q')
###########################################
# We can use the RadarServer instance to check our query, to make
# sure we have required parameters and that we have chosen valid
# station(s) and variable(s)
rs.validate_query(query)
###########################################
# Make the request, which returns an instance of TDSCatalog. This
# handles parsing the catalog
catalog = rs.get_catalog(query)
###########################################
# We can look at the datasets on the catalog to see what data we found by the query. We
# find one NIDS file in the return.
print(catalog.datasets)
###########################################
# We can pull that dataset out of the dictionary and look at the available access URLs.
# We see URLs for OPeNDAP, CDMRemote, and HTTPServer (direct download).
ds = list(catalog.datasets.values())[0]
print(ds.access_urls)
###########################################
# We'll use the CDMRemote reader in Siphon and pass it the appropriate access URL.
data = Dataset(ds.access_urls['CdmRemote'])
###########################################
# The CDMRemote reader provides an interface that is almost identical to the usual python
# NetCDF interface. We pull out the variables we need for azimuth and range, as well as
# the data itself.
rng = data.variables['gate'][:] / 1000.
az = data.variables['azimuth'][:]
ref = data.variables['BaseReflectivityDR'][:]
###########################################
# Then convert the polar coordinates to Cartesian
x = rng * np.sin(np.deg2rad(az))[:, None]
y = rng * np.cos(np.deg2rad(az))[:, None]
ref = np.ma.array(ref, mask=np.isnan(ref))
###########################################
# Finally, we plot them up using matplotlib.
fig, ax = plt.subplots(1, 1, figsize=(9, 8))
ax.pcolormesh(x, y, ref)
ax.set_aspect('equal', 'datalim')
ax.set_xlim(-460, 460)
ax.set_ylim(-460, 460)
| 37.188889 | 89 | 0.623842 |
4a22955fb7ecb2b393cf9a8bba188c0c2af4cec6 | 5,510 | py | Python | 1.9.4-479/benchmark.py | shaoziyang/micropython_benchmarks | adb2d218d76e725156c3a3752e642ddc627973d7 | [
"MIT"
] | 11 | 2018-11-26T13:00:38.000Z | 2021-06-03T10:26:12.000Z | 1.9.4-479/benchmark.py | shaoziyang/micropython_benchmarks | adb2d218d76e725156c3a3752e642ddc627973d7 | [
"MIT"
] | null | null | null | 1.9.4-479/benchmark.py | shaoziyang/micropython_benchmarks | adb2d218d76e725156c3a3752e642ddc627973d7 | [
"MIT"
] | 6 | 2018-09-16T04:15:05.000Z | 2022-01-11T00:34:03.000Z | '''
File: benchmarks.py
Descript: benchmark test for different microcontroller
Author: Shao ziyang
Data: 2018-Octo-25
Version: 1.0
'''
import time
import machine
import gc
def pi(places=100):
# 3 + 3*(1/24) + 3*(1/24)*(9/80) + 3*(1/24)*(9/80)*(25/168)
# The numerators 1, 9, 25, ... are given by (2x + 1) ^ 2
# The denominators 24, 80, 168 are given by (16x^2 -24x + 8)
extra = 8
one = 10 ** (places+extra)
t, c, n, na, d, da = 3*one, 3*one, 1, 0, 0, 24
while t > 1:
n, na, d, da = n+na, na+8, d+da, da+32
t = t * n // d
c += t
return c // (10 ** extra)
def pi_test(n = 5000):
t1 = time.ticks_ms()
t = pi(n)
t2 = time.ticks_ms()
r = time.ticks_diff(t2, t1)/1000
print(' Pi', n, 'digit calculation: ', r, 's')
return '%.2f'%r
def int_add_test(n = 1000000, a = 12345, b = 56789):
t1 = time.ticks_ms()
sum = 0
for i in range(n):
sum = a + b
t2 = time.ticks_ms()
r = time.ticks_diff(t2, t1)/1000
print(' Integer Add test', n, 'times: ', r, 's')
return '%.2f'%r
def float_add_test(n=1000000, a = 1234.5678, b = 5678.1234):
t1 = time.ticks_ms()
sum = 0
for i in range(n):
sum = a + b
t2 = time.ticks_ms()
r = time.ticks_diff(t2, t1)/1000
print(' Float Add test', n, 'times:', r, 's')
return '%.2f'%r
def int_mul_test(n=1000000, a = 12345, b = 56789):
t1 = time.ticks_ms()
sum = 0
for i in range(n):
sum = a * b
t2 = time.ticks_ms()
r = time.ticks_diff(t2, t1)/1000
print(' Integer Mul test', n, 'times: ', r, 's')
return '%.2f'%r
def float_mul_test(n=1000000, a = 1234.5678, b = 5678.1234):
t1 = time.ticks_ms()
sum = 0
for i in range(n):
sum = a * b
t2 = time.ticks_ms()
r = time.ticks_diff(t2, t1)/1000
print(' Float Mul test', n, 'times: ', r, 's')
return '%.2f'%r
def int_div_test(n=1000000, a = 123456, b = 567):
t1 = time.ticks_ms()
sum = 0
for i in range(n):
sum = a // b
t2 = time.ticks_ms()
r = time.ticks_diff(t2, t1)/1000
print(' Integer Div test', n, 'times: ', r, 's')
return '%.2f'%r
def float_div_test(n=1000000, a = 12345.678, b = 56.789):
t1 = time.ticks_ms()
sum = 0
for i in range(n):
sum = a / b
t2 = time.ticks_ms()
r = time.ticks_diff(t2, t1)/1000
print(' Float Div test', n, 'times: ', r, 's')
return '%.2f'%r
def mem():
r = gc.mem_free()
print('free memory:', r)
print('Speed test')
try:
print('System freq: {:.1f} MHz'.format(machine.freq()[0]/1000000))
except:
print('System freq: {:.1f} MHz'.format(machine.freq()/1000000))
print('\nCalcaulate integer addition')
gc.collect()
mem()
d1 = int_add_test()
d2 = int_add_test()
d3 = int_add_test()
r_int_add = min(d1, d2, d3)
print('Integer addition test result: ', r_int_add, 's')
mem()
print('\nCalcaulate float addition')
gc.collect()
mem()
d1 = float_add_test()
d2 = float_add_test()
d3 = float_add_test()
r_float_add = min(d1, d2, d3)
print('Float addition test result: ', r_float_add, 's')
mem()
print('\nCalcaulate integer multiplication')
gc.collect()
mem()
d1 = int_mul_test()
d2 = int_mul_test()
d3 = int_mul_test()
r_int_mul = min(d1, d2, d3)
print('Integer multiplication test result: ', r_int_mul, 's')
mem()
print('\nCalcaulate float multiplication')
gc.collect()
mem()
d1 = float_mul_test()
d2 = float_mul_test()
d3 = float_mul_test()
r_float_mul = min(d1, d2, d3)
print('Float multiplication test result: ', r_float_mul, 's')
mem()
print('\nCalcaulate integer division')
gc.collect()
mem()
d1 = int_div_test()
d2 = int_div_test()
d3 = int_div_test()
r_int_div = min(d1, d2, d3)
print('Integer division test result: ', r_int_div, 's')
mem()
print('\nCalcaulate float division')
gc.collect()
mem()
d1 = float_div_test()
d2 = float_div_test()
d3 = float_div_test()
r_float_div = min(d1, d2, d3)
print('Float division test result: ', r_float_div, 's')
mem()
print('\nCalcaulate Pi 1000 digit')
gc.collect()
mem()
try:
d1 = pi_test(1000)
d2 = pi_test(1000)
d3 = pi_test(1000)
r_pi_1000 = min(d1, d2, d3)
print('1000 digit Pi calculation result: ', r_pi_1000, 's')
mem()
except:
r_pi_1000 = None
print(' calculation error')
print('\nCalcaulate Pi 5000 digit')
gc.collect()
mem()
try:
d1 = pi_test(5000)
d2 = pi_test(5000)
d3 = pi_test(5000)
r_pi_5000 = min(d1, d2, d3)
print('5000 digit Pi calculation result: ', r_pi_5000, 's')
mem()
except:
r_pi_5000 = None
print(' calculation error')
print('\nCalcaulate Pi 100,000 digit')
gc.collect()
mem()
try:
d1 = pi_test(100000)
d2 = pi_test(100000)
d3 = pi_test(100000)
r_pi_100000 = min(d1, d2, d3)
print('100000 digit Pi calculation result: ', r_pi_100000, 's')
mem()
except:
r_pi_100000 = None
print(' calculation error')
print('Test result:')
print(' Integer addition test result: ', r_int_add, 's')
print(' Float addition test result: ', r_float_add, 's')
print(' Integer multiplication test result: ', r_int_mul, 's')
print(' Float multiplication test result: ', r_float_mul, 's')
print(' Integer division test result: ', r_int_div, 's')
print(' Float division test result: ', r_float_div, 's')
if r_pi_1000:
print(' 1000 digit Pi calculation result: ', r_pi_1000, 's')
if r_pi_5000:
print(' 5000 digit Pi calculation result: ', r_pi_5000, 's')
if r_pi_100000:
print(' 100000 digit Pi calculation result: ', r_pi_100000, 's')
| 25.159817 | 70 | 0.613067 |
4a2295ed8a033613cd950d94a8f1b7904fd6b369 | 29 | py | Python | pyson_connect/__init__.py | clipper-programing/pyson_connect | 103f74d62b43c093555988bca58b6e1510f99341 | [
"BSD-3-Clause"
] | 2 | 2022-01-04T01:57:34.000Z | 2022-02-06T22:00:37.000Z | pyson_connect/__init__.py | clipper-programing/pyson_connect | 103f74d62b43c093555988bca58b6e1510f99341 | [
"BSD-3-Clause"
] | null | null | null | pyson_connect/__init__.py | clipper-programing/pyson_connect | 103f74d62b43c093555988bca58b6e1510f99341 | [
"BSD-3-Clause"
] | null | null | null | from .pyson_connect import *
| 14.5 | 28 | 0.793103 |
4a22974b26e80050b2e0b32ee5cbd0ca3d231352 | 9,929 | py | Python | GANs/tecogan/train.py | shikisawamura/nnabla-examples | baf4e4cc620dedbf4368683325c0fb868676850d | [
"Apache-2.0"
] | null | null | null | GANs/tecogan/train.py | shikisawamura/nnabla-examples | baf4e4cc620dedbf4368683325c0fb868676850d | [
"Apache-2.0"
] | null | null | null | GANs/tecogan/train.py | shikisawamura/nnabla-examples | baf4e4cc620dedbf4368683325c0fb868676850d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.solvers as S
from nnabla.ext_utils import get_extension_context
from nnabla.monitor import Monitor
from tecogan_model import get_tecogan_model, get_frvsr_model, get_common_monitors, get_tecogan_monitors
from utils import CommunicatorWrapper, save_checkpoint, load_checkpoint
from utils.utils import ExponentialMovingAverage
from args import get_config
def main():
conf = get_config()
extension_module = conf.nnabla_context.context
ctx = get_extension_context(
extension_module, device_id=conf.nnabla_context.device_id)
comm = CommunicatorWrapper(ctx)
nn.set_default_context(comm.ctx)
print("comm rank", comm.rank)
# data iterators for train and val data
from data_loader import data_iterator_sr, get_sample_name_grid, nn_data_gauss_down_quad
sample_names = get_sample_name_grid(conf)
num_samples = len(sample_names[0])
print("No of training samples :", num_samples)
tar_size = conf.train.crop_size
tar_size = (conf.train.crop_size * 4) + int(1.5 * 3.0) * \
2 # crop_size * 4, and Gaussian blur margin
data_iterator_train = data_iterator_sr(
conf, num_samples, sample_names, tar_size, shuffle=True)
if comm.n_procs > 1:
data_iterator_train = data_iterator_train.slice(
rng=None, num_of_slices=comm.n_procs, slice_pos=comm.rank)
train_hr = nn.Variable(
(conf.train.batch_size, conf.train.rnn_n, conf.train.crop_size*4, conf.train.crop_size*4, 3))
data_hr = nn.Variable(
(conf.train.batch_size, conf.train.rnn_n, tar_size, tar_size, 3))
train_lr = nn_data_gauss_down_quad(data_hr.reshape(
(conf.train.batch_size * conf.train.rnn_n, tar_size, tar_size, 3)))
train_lr = F.reshape(
train_lr, (conf.train.batch_size, conf.train.rnn_n, conf.train.crop_size, conf.train.crop_size, 3))
# setting up monitors for logging
monitor_path = './nnmonitor' + \
str(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
monitor = Monitor(monitor_path)
common_monitor = get_common_monitors(monitor)
# Change max_iter and learning_rate when batch size or no. of gpu devices change.
div_factor = conf.train.batch_size * comm.n_procs
max_iter = (conf.train.max_iter * 4) // div_factor
learning_rate = conf.train.learning_rate * \
(conf.train.batch_size / 4) * comm.n_procs
if comm.rank == 0:
print("maximum iterations", max_iter)
scope_name = 'frvsr/'
if conf.train.tecogan:
scope_name = 'tecogan/'
if not conf.train.checkpoint:
print('loading pretrained FRVSR model',
conf.train.pre_trained_frvsr_weights)
with nn.parameter_scope(scope_name):
nn.load_parameters(conf.train.pre_trained_frvsr_weights)
params_from_pre_trained_model = []
for key, val in nn.get_parameters().items():
params_from_pre_trained_model.append(scope_name + key)
network = get_tecogan_model(conf, train_lr, train_hr, scope_name)
params_from_graph = nn.get_parameters()
# Set the Generator parameters which are not in FRVSR to zero,
# as done in orig implementation.
for key, val in params_from_graph.items():
if key in params_from_pre_trained_model or key.startswith('vgg') or key.startswith('disc'):
continue
print(key)
val.data.zero() # fill with zero
else:
network = get_tecogan_model(conf, train_lr, train_hr, scope_name)
# Define discriminator optimizer/solver
solver_disc = S.Adam(alpha=learning_rate,
beta1=conf.train.beta, eps=conf.train.adameps)
# Set discriminator Parameters
with nn.parameter_scope("discriminator"):
solver_disc.set_parameters(nn.get_parameters())
# setting up monitors for TecoGAN
tecogan_monitor = get_tecogan_monitors(monitor)
else:
network = get_frvsr_model(conf, train_lr, train_hr, scope_name)
# Define generator and fnet optimizer/solver
solver_gen = S.Adam(alpha=learning_rate,
beta1=conf.train.beta, eps=conf.train.adameps)
solver_fnet = S.Adam(alpha=learning_rate,
beta1=conf.train.beta, eps=conf.train.adameps)
# Set generator and fnet Parameters
with nn.parameter_scope(scope_name + "generator"):
solver_gen.set_parameters(nn.get_parameters())
with nn.parameter_scope(scope_name + "fnet"):
solver_fnet.set_parameters(nn.get_parameters())
if conf.train.tecogan:
solver_dict = {"gen": solver_gen,
"fnet": solver_fnet, "disc": solver_disc}
else:
solver_dict = {"gen": solver_gen, "fnet": solver_fnet}
start_point = 0
if conf.train.checkpoint:
# Load optimizer/solver information and model weights from checkpoint
start_point = load_checkpoint(conf.train.checkpoint, solver_dict)
# Exponential Moving Average Calculation for tb
ema = ExponentialMovingAverage(conf.train.decay)
tb = 0
# Create output directory if it doesn't exist
if not os.path.exists(conf.data.output_dir):
os.makedirs(conf.data.output_dir)
# Training loop.
for i in range(start_point, max_iter):
# Get Training Data
data_hr.d, train_hr.d = data_iterator_train.next()
if conf.train.tecogan:
network.t_discrim_loss.forward(clear_no_need_grad=True)
if np.less(tb, 0.4): # train gen with d
# Compute grads for discriminator and update
solver_disc.zero_grad()
# Stop back-propagation from t_discrim_loss to generator
network.t_gen_output.need_grad = False
if comm.n_procs > 1:
all_reduce_callback = comm.get_all_reduce_callback()
network.t_discrim_loss.backward(clear_buffer=True,
communicator_callbacks=all_reduce_callback)
else:
network.t_discrim_loss.backward(clear_buffer=True)
solver_disc.update() # Update grads
# Enable back propagation from fnet_loss to Generator
network.t_gen_output.need_grad = True
# Compute grads for fnet and generator together using fnet_loss
solver_fnet.zero_grad()
solver_gen.zero_grad()
# Apply forward and backward propagation on fnet_loss
network.fnet_loss.forward(clear_no_need_grad=True)
if comm.n_procs > 1:
all_reduce_callback = comm.get_all_reduce_callback()
network.fnet_loss.backward(clear_buffer=True,
communicator_callbacks=all_reduce_callback)
else:
network.fnet_loss.backward(clear_buffer=True)
# Update grads for fnet and generator
solver_gen.update()
solver_fnet.update()
if conf.train.tecogan:
if comm.n_procs > 1:
comm.all_reduce([network.t_discrim_real_loss.data,
network.t_adversarial_loss.data], division=True, inplace=True)
t_balance = F.mean(network.t_discrim_real_loss.data) + \
network.t_adversarial_loss.data
if i == 0:
ema.register(t_balance)
else:
tb = ema(t_balance)
if comm.rank == 0:
tecogan_monitor.monitor_pp_loss.add(
i, network.pp_loss.d.copy())
tecogan_monitor.monitor_vgg_loss.add(
i, network.vgg_loss.d.copy())
tecogan_monitor.monitor_sum_layer_loss.add(
i, network.sum_layer_loss.d.copy())
tecogan_monitor.monitor_adv_loss.add(
i, network.t_adversarial_loss.d.copy())
tecogan_monitor.monitor_disc_loss.add(
i, network.t_discrim_loss.d.copy())
tecogan_monitor.monitor_tb.add(i, tb)
if comm.rank == 0:
common_monitor.monitor_content_loss.add(
i, network.content_loss.d.copy())
common_monitor.monitor_gen_loss.add(i, network.gen_loss.d.copy())
common_monitor.monitor_warp_loss.add(i, network.warp_loss.d.copy())
common_monitor.monitor_lr.add(i, learning_rate)
common_monitor.monitor_time.add(i)
if (i % conf.train.save_freq) == 0:
# Save intermediate model parameters
with nn.parameter_scope(scope_name):
nn.save_parameters(os.path.join(
conf.data.output_dir, "model_param_%08d.h5" % i))
# Save intermediate check_points
save_checkpoint(conf.data.output_dir, i, solver_dict)
# save final Generator and Fnet network parameters
if comm.rank == 0:
with nn.parameter_scope(scope_name):
nn.save_parameters(os.path.join(
conf.data.output_dir, "model_param_%08d.h5" % i))
if __name__ == "__main__":
main()
| 42.251064 | 107 | 0.647699 |
4a2298526854320f99ed39cda615f37d6526a714 | 4,051 | py | Python | modin/pandas/test/test_api.py | calzoom/modin | 17b7fccb28cf525bf1abd1a7be979c4cb5b66688 | [
"Apache-2.0"
] | 1 | 2019-03-12T08:24:24.000Z | 2019-03-12T08:24:24.000Z | modin/pandas/test/test_api.py | calzoom/modin | 17b7fccb28cf525bf1abd1a7be979c4cb5b66688 | [
"Apache-2.0"
] | null | null | null | modin/pandas/test/test_api.py | calzoom/modin | 17b7fccb28cf525bf1abd1a7be979c4cb5b66688 | [
"Apache-2.0"
] | null | null | null | import modin.pandas as pd
import pandas
import inspect
import numpy as np
def test_top_level_api_equality():
modin_dir = [obj for obj in dir(pd) if obj[0] != "_"]
pandas_dir = [obj for obj in dir(pandas) if obj[0] != "_"]
missing_from_modin = set(pandas_dir) - set(modin_dir)
ignore = [
"np",
"testing",
"pandas",
"core",
"compat",
"util",
"offsets",
"datetime",
"arrays",
"api",
"tseries",
"errors",
"to_msgpack", # This one is experimental, and doesn't look finished
"describe_option",
"get_option",
"option_context",
"reset_option",
"options",
]
assert not len(
missing_from_modin - set(ignore)
), "Differences found in API: {}".format(missing_from_modin - set(ignore))
difference = []
allowed_different = ["Interval"]
for m in set(pandas_dir) - set(ignore):
if m in allowed_different:
continue
try:
pandas_sig = dict(inspect.signature(getattr(pandas, m)).parameters)
except TypeError:
continue
try:
modin_sig = dict(inspect.signature(getattr(pd, m)).parameters)
except TypeError:
continue
if not pandas_sig == modin_sig:
try:
append_val = (
m,
{
i: pandas_sig[i]
for i in pandas_sig.keys()
if pandas_sig[i] != modin_sig[i]
and not (
pandas_sig[i].default is np.nan
and modin_sig[i].default is np.nan
)
},
)
except Exception:
print(pandas_sig)
print(modin_sig)
print(m)
raise
try:
# This validates that there are actually values to add to the difference
# based on the condition above.
if len(list(append_val[-1])[-1]) > 0:
difference.append(append_val)
except IndexError:
pass
assert not len(difference), "Differences found in API: {}".format(difference)
def test_dataframe_api_equality():
modin_dir = [obj for obj in dir(pd.DataFrame) if obj[0] != "_"]
pandas_dir = [obj for obj in dir(pandas.DataFrame) if obj[0] != "_"]
ignore = ["timetuple"]
missing_from_modin = set(pandas_dir) - set(modin_dir)
assert not len(missing_from_modin - set(ignore))
assert not len(set(modin_dir) - set(pandas_dir))
# These have to be checked manually
allowed_different = ["to_hdf", "hist"]
difference = []
for m in modin_dir:
if m in allowed_different:
continue
try:
pandas_sig = dict(
inspect.signature(getattr(pandas.DataFrame, m)).parameters
)
except TypeError:
continue
try:
modin_sig = dict(inspect.signature(getattr(pd.DataFrame, m)).parameters)
except TypeError:
continue
if not pandas_sig == modin_sig:
append_val = (
m,
{
i: pandas_sig[i]
for i in pandas_sig.keys()
if pandas_sig[i] != modin_sig[i]
and not (
pandas_sig[i].default is np.nan
and modin_sig[i].default is np.nan
)
},
)
try:
# This validates that there are actually values to add to the difference
# based on the condition above.
if len(list(append_val[-1])[-1]) > 0:
difference.append(append_val)
except IndexError:
pass
assert not len(difference), "Differences found in API: {}".format(difference)
| 30.923664 | 88 | 0.503579 |
4a229868ce85fe66b251fa265829dfc25116a9ed | 178 | py | Python | tests/sessions_tests/no_clear_expired.py | jpmallarino/django | 659d2421c7adbbcd205604002d521d82d6b0b465 | [
"BSD-3-Clause",
"0BSD"
] | 16 | 2019-08-10T12:24:06.000Z | 2020-05-21T09:11:14.000Z | tests/sessions_tests/no_clear_expired.py | jpmallarino/django | 659d2421c7adbbcd205604002d521d82d6b0b465 | [
"BSD-3-Clause",
"0BSD"
] | 12 | 2019-08-10T11:55:29.000Z | 2020-05-21T04:46:30.000Z | tests/sessions_tests/no_clear_expired.py | jpmallarino/django | 659d2421c7adbbcd205604002d521d82d6b0b465 | [
"BSD-3-Clause",
"0BSD"
] | 3 | 2019-08-20T13:29:34.000Z | 2020-01-30T22:05:10.000Z | from django.contrib.sessions.backends.base import SessionBase
class SessionStore(SessionBase):
"""Session store without support for clearing expired sessions."""
pass
| 22.25 | 70 | 0.775281 |
4a229897c338a01c8027a906ad4a72eec5b25577 | 250,164 | py | Python | tst/style/cpplint.py | charge72002/Athena_Radiation-master | a192aeab0c3cd258853fe43468a4a1d8c2a9279d | [
"BSD-3-Clause"
] | null | null | null | tst/style/cpplint.py | charge72002/Athena_Radiation-master | a192aeab0c3cd258853fe43468a4a1d8c2a9279d | [
"BSD-3-Clause"
] | null | null | null | tst/style/cpplint.py | charge72002/Athena_Radiation-master | a192aeab0c3cd258853fe43468a4a1d8c2a9279d | [
"BSD-3-Clause"
] | 1 | 2021-07-15T19:06:10.000Z | 2021-07-15T19:06:10.000Z | #!/usr/bin/env python2
# --------------------------------------------------------------------
# KGF: https://github.com/cpplint/cpplint forked version 5a38c3a
# (modified to use Python 2 when run from command line)
# KGF: originally used google/styleguide version 1b206ee
# --------------------------------------------------------------------
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import glob
import itertools
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
import xml.etree.ElementTree
# if empty, use defaults
_header_extensions = set([])
# if empty, use defaults
_valid_extensions = set([])
# Files with any of these extensions are considered to be
# header files (and will undergo different style checks).
# This set can be extended by using the --headers
# option (also supported in CPPLINT.cfg)
def GetHeaderExtensions():
if not _header_extensions:
return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
return _header_extensions
# The allowed extensions for file names
# This is set by --extensions flag
def GetAllExtensions():
if not _valid_extensions:
return GetHeaderExtensions().union(set(['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
return _valid_extensions
def GetNonHeaderExtensions():
return GetAllExtensions().difference(GetHeaderExtensions())
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit]
[--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--repository=path]
[--root=subdir] [--linelength=digits] [--recursive]
[--exclude=path]
[--headers=ext1,ext2]
[--extensions=hpp,cpp,...]
<file> [file] ...
The style guidelines this tries to follow are those in
https://google.github.io/styleguide/cppguide.html
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are %s.
Other file types will be ignored.
Change the extensions with the --extensions flag.
Flags:
output=emacs|eclipse|vs7|junit
By default, the output is formatted to ease emacs parsing. Output
compatible with eclipse (eclipse), Visual Studio (vs7), and JUnit
XML parsers such as those used in Jenkins and Bamboo may also be
used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
Errors with lower verbosity levels have lower confidence and are more
likely to be false positives.
quiet
Supress output other than linting errors, such as information about
which files have been processed and excluded.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
repository=path
The top level directory of the repository, used to derive the header
guard CPP variable. By default, this is determined by searching for a
path that contains .git, .hg, or .svn. When this flag is specified, the
given path is used instead. This option allows the header guard CPP
variable to remain consistent even if members of a team have different
repository root directories (such as when checking out a subdirectory
with SVN). In addition, users of non-mainstream version control systems
can use this flag to ensure readable header guard CPP variables.
Examples:
Assuming that Alice checks out ProjectName and Bob checks out
ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
with no --repository flag, the header guard CPP variable will be:
Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
If Alice uses the --repository=trunk flag and Bob omits the flag or
uses --repository=. then the header guard CPP variable will be:
Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
root=subdir
The root directory used for deriving header guard CPP variables. This
directory is relative to the top level directory of the repository which
by default is determined by searching for a directory that contains .git,
.hg, or .svn but can also be controlled with the --repository flag. If
the specified directory does not exist, this flag is ignored.
Examples:
Assuming that src is the top level directory of the repository, the
header guard CPP variables for src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
recursive
Search for files to lint recursively. Each directory given in the list
of files to be linted is replaced by all files that descend from that
directory. Files with extensions not in the valid extensions list are
excluded.
exclude=path
Exclude the given path from the list of files to be linted. Relative
paths are evaluated relative to the current directory and shell globbing
is performed. This flag can be provided multiple times to exclude
multiple files.
Examples:
--exclude=one.cc
--exclude=src/*.cc
--exclude=src/*.cc --exclude=test/*.cc
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=%s
headers=extension,extension,...
The allowed header extensions that cpplint will consider to be header files
(by default, only files with extensions %s
will be assumed to be headers)
Examples:
--headers=%s
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
root=subdir
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through the linter.
"linelength" specifies the allowed line length for the project.
The "root" option is similar in function to the --root flag (see example
above).
CPPLINT.cfg has an effect on files in the same directory and all
subdirectories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all subdirectories.
""" % (list(GetAllExtensions()),
','.join(list(GetAllExtensions())),
GetHeaderExtensions(),
','.join(GetHeaderExtensions()))
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/c++14',
'build/c++tr1',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_subdir',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces_literals',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_if_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
]
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
'readability/streams',
'readability/function',
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# The default list of categories suppressed for C (not C++) files.
_DEFAULT_C_SUPPRESSED_CATEGORIES = [
'readability/casting',
]
# The default list of categories suppressed for Linux Kernel files.
_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
'whitespace/tab',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'scoped_allocator',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++14 headers
'shared_mutex',
# 17.6.1.2 C++17 headers
'any',
'charconv',
'codecvt',
'execution',
'filesystem',
'memory_resource',
'optional',
'string_view',
'variant',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Type names
_TYPES = re.compile(
r'^(?:'
# [dcl.type.simple]
r'(char(16_t|32_t)?)|wchar_t|'
r'bool|short|int|long|signed|unsigned|float|double|'
# [support.types]
r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
# [cstdint.syn]
r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
r'(u?int(max|ptr)_t)|'
r')$')
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Pattern for matching FileInfo.BaseName() against test file name
_test_suffixes = ['_test', '_regtest', '_unittest']
_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
# Pattern that matches only complete whitespace, possibly across multiple lines.
_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
# Assertion macros. These are defined in base/logging.h and
# testing/base/public/gunit.h.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE', 'ASSERT_TRUE',
'EXPECT_FALSE', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
# Match strings that indicate we're working on a C (not C++) file.
_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
# Match string that indicates we're working on a Linux Kernel file.
_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The top level repository directory. If set, _root is calculated relative to
# this directory instead of the directory containing version control artifacts.
# This is set by the --repository flag.
_repository = None
# Files to exclude from linting. This is set by the --exclude flag.
_excludes = None
# Whether to supress PrintInfo messages
_quiet = False
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
try:
xrange(1, 0)
except NameError:
# -- pylint: disable=redefined-builtin
xrange = range
try:
unicode
except NameError:
# -- pylint: disable=redefined-builtin
basestring = unicode = str
try:
long(2)
except NameError:
# -- pylint: disable=redefined-builtin
long = int
if sys.version_info < (3,):
# -- pylint: disable=no-member
# BINARY_TYPE = str
itervalues = dict.itervalues
iteritems = dict.iteritems
else:
# BINARY_TYPE = bytes
itervalues = dict.values
iteritems = dict.items
def unicode_escape_decode(x):
if sys.version_info < (3,):
return codecs.unicode_escape_decode(x)[0]
else:
return x
# {str, bool}: a map from error categories to booleans which indicate if the
# category should be suppressed for every line.
_global_error_suppressions = {}
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ProcessGlobalSuppresions(lines):
"""Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
Args:
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
"""
for line in lines:
if _SEARCH_C_FILE.search(line):
for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
if _SEARCH_KERNEL_FILE.search(line):
for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
_global_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
"""
return (_global_error_suppressions.get(category, False) or
linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
def _IsSourceExtension(s):
"""File extension (excluding dot) matches a source file extension."""
return s in GetNonHeaderExtensions()
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self._section = None
self._last_header = None
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "eclipse" - format that eclipse can parse
# "vs7" - format that Microsoft Visual Studio 7 can parse
# "junit" - format that Jenkins, Bamboo, etc can parse
self.output_format = 'emacs'
# For JUnit output, save errors and failures until the end so that they
# can be written into the XML
self._junit_errors = []
self._junit_failures = []
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in sorted(iteritems(self.errors_by_category)):
self.PrintInfo('Category \'%s\' errors found: %d\n' %
(category, count))
if self.error_count > 0:
self.PrintInfo('Total errors found: %d\n' % self.error_count)
def PrintInfo(self, message):
if not _quiet and self.output_format != 'junit':
sys.stderr.write(message)
def PrintError(self, message):
if self.output_format == 'junit':
self._junit_errors.append(message)
else:
sys.stderr.write(message)
def AddJUnitFailure(self, filename, linenum, message, category, confidence):
self._junit_failures.append((filename, linenum, message, category,
confidence))
def FormatJUnitXML(self):
num_errors = len(self._junit_errors)
num_failures = len(self._junit_failures)
testsuite = xml.etree.ElementTree.Element('testsuite')
testsuite.attrib['name'] = 'cpplint'
testsuite.attrib['errors'] = str(num_errors)
testsuite.attrib['failures'] = str(num_failures)
if num_errors == 0 and num_failures == 0:
testsuite.attrib['tests'] = str(1)
xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
else:
testsuite.attrib['tests'] = str(num_errors + num_failures)
if num_errors > 0:
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = 'errors'
error = xml.etree.ElementTree.SubElement(testcase, 'error')
error.text = '\n'.join(self._junit_errors)
if num_failures > 0:
# Group failures by file
failed_file_order = []
failures_by_file = {}
for failure in self._junit_failures:
failed_file = failure[0]
if failed_file not in failed_file_order:
failed_file_order.append(failed_file)
failures_by_file[failed_file] = []
failures_by_file[failed_file].append(failure)
# Create a testcase for each file
for failed_file in failed_file_order:
failures = failures_by_file[failed_file]
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = failed_file
failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
template = '{0}: {1} [{2}] [{3}]'
texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
failure.text = '\n'.join(texts)
xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if not self.in_a_function:
return
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
r"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
# If the user specified a repository path, it exists, and the file is
# contained in it, use the specified repository path
if _repository:
repo = FileInfo(_repository).FullName()
root_dir = project_dir
while os.path.exists(root_dir):
# allow case insensitive compare on Windows
if os.path.normcase(root_dir) == os.path.normcase(repo):
return os.path.relpath(fullname, root_dir).replace('\\', '/')
one_up_dir = os.path.dirname(root_dir)
if one_up_dir == root_dir:
break
root_dir = one_up_dir
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = current_dir = os.path.dirname(fullname)
while current_dir != os.path.dirname(current_dir):
if (os.path.exists(os.path.join(current_dir, ".git")) or
os.path.exists(os.path.join(current_dir, ".hg")) or
os.path.exists(os.path.join(current_dir, ".svn"))):
root_dir = current_dir
current_dir = os.path.dirname(current_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period, includes that period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return _IsSourceExtension(self.Extension()[1:])
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
_cpplint_state.PrintError('%s(%s): warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'junit':
_cpplint_state.AddJUnitFailure(filename, linenum, message, category,
confidence)
else:
final_message = '%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence)
sys.stderr.write(final_message)
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
#
# Once we have matched a raw string, we check the prefix of the
# line to make sure that the line is not part of a single line
# comment. It's done this way because we remove raw strings
# before removing comments as opposed to removing comments
# before removing raw strings. This is because there are some
# cpplint checks that requires the comments to be preserved, but
# we don't want to check comments that are inside raw strings.
matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if (matched and
not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
matched.group(1))):
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
3) raw_lines member contains all the lines without processing.
4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
strings removed.
All these members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in range(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
# Convert root path to unix format because file_path_from_root is also
# in that format and they wouldn't match otherwise on Windows machines
root = os.path.normpath(_root).replace('\\', '/')
file_path_from_root = re.sub('^' + root + '/', '', file_path_from_root)
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
# Allow pragma once instead of header guards
for i in raw_lines:
if Search(r'^\s*#pragma\s+once', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a source file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
if not os.path.exists(headerfile):
continue
headername = FileInfo(headerfile).RepositoryName()
first_include = None
for section_list in include_state.include_list:
for f in section_list:
if headername in f[0] or f[0] in headername:
return
if not first_include:
first_include = f[1]
error(filename, first_include, 'build/include', 5,
'%s should include its header file %s' % (fileinfo.RepositoryName(),
headername))
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if unicode_escape_decode('\ufffd') in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, linenum, seen_open_brace):
self.starting_linenum = linenum
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self, linenum):
_BlockInfo.__init__(self, linenum, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
for i in xrange(linenum - 1, self.starting_linenum, -1):
match = Search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
if match:
if seen_last_thing_in_class:
error(filename, i, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
break
if not Match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name or ''
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
re.escape(self.name) + r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
variadic_args = [arg for arg in constructor_args if '&&...' in arg]
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1) or
# variadic arguments with zero or one argument
(len(constructor_args) <= 2 and
len(variadic_args) >= 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args or variadic_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in range(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. This is because there are too
# many false positives due to RValue references.
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def _IsType(clean_lines, nesting_state, expr):
"""Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
expr: The expression to check.
Returns:
True, if token looks like a type.
"""
# Keep only the last token in the expression
last_word = Match(r'^.*(\b\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
token = expr
# Match native types and stdint types
if _TYPES.match(token):
return True
# Try a bit harder to match templated types. Walk up the nesting
# stack until we find something that resembles a typename
# declaration for what we are looking for.
typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
r'\b')
block_index = len(nesting_state.stack) - 1
while block_index >= 0:
if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
return False
# Found where the opening brace is. We want to scan from this
# line up to the beginning of the function, minus a few lines.
# template <typename Type1, // stop scanning here
# ...>
# class C
# : public ... { // start scanning here
last_line = nesting_state.stack[block_index].starting_linenum
next_block_start = 0
if block_index > 0:
next_block_start = nesting_state.stack[block_index - 1].starting_linenum
first_line = last_line
while first_line >= next_block_start:
if clean_lines.elided[first_line].find('template') >= 0:
break
first_line -= 1
if first_line < next_block_start:
# Didn't find any "template" keyword before reaching the next block,
# there are probably no template things to check for this block
block_index -= 1
continue
# Look for typename in the specified range
for i in xrange(first_line, last_line + 1, 1):
if Search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
return False
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces when they are delimiting blocks, classes, namespaces etc.
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
leading_text = match.group(1)
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block. We also allow a brace on the
# following line if it is part of an array initialization and would not fit
# within the 80 character limit of the preceding line.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline) and
not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
bodylist = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(bodylist)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause'))
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
prev = raw_lines[linenum - 1] if linenum > 0 else ''
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
# We also don't check for lines that look like continuation lines
# (of lines ending in double quotes, commas, equals, or angle brackets)
# because the rules for how to indent those are non-trivial.
if (not Search(r'[",=><] *$', prev) and
(initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# Check if the line is a header guard.
is_header_guard = False
if file_extension in GetHeaderExtensions():
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
#
# Doxygen documentation copying can get pretty long when using an overloaded
# function declaration
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^\s*//\s*[^\s]*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# allow simple single line lambdas
not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
line) and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in itertools.chain(
('%s.%s' % (test_suffix.lstrip('_'), ext)
for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
('%s.%s' % (suffix, ext)
for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
# Headers with C++ extensions shouldn't be considered C system headers
if is_system and os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']:
is_system = False
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
target_dir_pub = os.path.normpath(target_dir + '/../public')
target_dir_pub = target_dir_pub.replace('\\', '/')
if target_base == include_base and (
include_dir == target_dir or
include_dir == target_dir_pub):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include_subdir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
return
for extension in GetNonHeaderExtensions():
if (include.endswith('.' + extension) and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/include', 4,
'Do not include .' + extension + ' files from other packages')
return
if not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
# Stream types.
_RE_PATTERN_REF_STREAM_PARAM = (
r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension in GetHeaderExtensions():
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
if Search(r'\bliterals\b', line):
error(filename, linenum, 'build/namespaces_literals', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
else:
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension in GetHeaderExtensions()
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access, and
# also because globals can be destroyed when some threads are still running.
# TODO(unknown): Generalize this to also find static unique_ptr instances.
# TODO(unknown): File bugs for clang-tidy to find these.
match = Match(
r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
r'([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
if Search(r'\bconst\b', line):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string '
'instead: "%schar%s %s[]".' %
(match.group(1), match.group(2) or '', match.group(3)))
else:
error(filename, linenum, 'runtime/string', 4,
'Static/global string variables are not permitted.')
if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Don't warn on out-of-line method definitions, as we would warn on the
# in-line declaration, if it isn't marked with 'override'.
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old style cast.
# If we see those, don't issue warnings for deprecated casts.
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
return False
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
'unique_ptr', 'weak_ptr')),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
('<unordered_map>', ('unordered_map', 'unordered_multimap')),
('<unordered_set>', ('unordered_set', 'unordered_multiset')),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_MAYBE_TEMPLATES = (
('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
'transform',
)),
('<utility>', ('forward', 'make_pair', 'move', 'swap')),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_headers_maybe_templates = []
for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
for _template in _templates:
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
_header))
# Other scripts may reach in and modify this pattern.
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the source (e.g. .cc) file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
fileinfo_cc = FileInfo(filename_cc)
if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
return (False, '')
fileinfo_h = FileInfo(filename_h)
if not fileinfo_h.Extension().lstrip('.') in GetHeaderExtensions():
return (False, '')
filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
if matched_test_suffix:
filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in range(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_headers_maybe_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
matched = pattern.search(line)
if matched:
# Don't warn about IWYU in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = list(include_dict.keys())
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if not header_found:
for extension in GetNonHeaderExtensions():
if filename.endswith('.' + extension):
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in sorted(required, key=required.__getitem__):
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
return len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo))
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=None):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
if extra_check_functions:
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++ TR1 headers.
if include and include.group(1).startswith('tr1/'):
error(filename, linenum, 'build/c++tr1', 5,
('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
# Flag unapproved C++11 headers.
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def FlagCxx14Features(filename, clean_lines, linenum, error):
"""Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=None):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if file_extension in GetHeaderExtensions():
CheckForHeaderGuard(filename, clean_lines, error)
for line in range(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
_cpplint_state.PrintInfo('Ignoring "%s": file excluded by '
'"%s". File path component "%s" matches pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
_cpplint_state.PrintError('Line length must be numeric.')
elif name == 'extensions':
global _valid_extensions
try:
extensions = [ext.strip() for ext in val.split(',')]
_valid_extensions = set(extensions)
except ValueError:
sys.stderr.write('Extensions should be a comma-separated list of values;'
'for example: extensions=hpp,cpp\n'
'This could not be parsed: "%s"' % (val,))
elif name == 'headers':
global _header_extensions
try:
extensions = [ext.strip() for ext in val.split(',')]
_header_extensions = set(extensions)
except ValueError:
sys.stderr.write('Extensions should be a comma-separated list of values;'
'for example: extensions=hpp,cpp\n'
'This could not be parsed: "%s"' % (val,))
elif name == 'root':
global _root
_root = val
else:
_cpplint_state.PrintError(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
_cpplint_state.PrintError(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for cfg_filter in reversed(cfg_filters):
_AddFilters(cfg_filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=None):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
_cpplint_state.PrintError(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in GetAllExtensions():
_cpplint_state.PrintError('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(GetAllExtensions())))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
_cpplint_state.PrintInfo('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(0)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'repository=',
'linelength=',
'extensions=',
'exclude=',
'headers=',
'quiet',
'recursive'])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
recursive = False
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse', 'junit'):
PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
'and junit.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--repository':
global _repository
_repository = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--exclude':
global _excludes
if not _excludes:
_excludes = set()
_excludes.update(glob.glob(val))
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
elif opt == '--headers':
global _header_extensions
try:
_header_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
elif opt == '--recursive':
recursive = True
elif opt == '--quiet':
global _quiet
_quiet = True
if not filenames:
PrintUsage('No files were specified.')
if recursive:
filenames = _ExpandDirectories(filenames)
if _excludes:
filenames = _FilterExcludedFiles(filenames)
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def _ExpandDirectories(filenames):
"""Searches a list of filenames and replaces directories in the list with
all files descending from those directories. Files with extensions not in
the valid extensions list are excluded.
Args:
filenames: A list of files or directories
Returns:
A list of all files that are members of filenames or descended from a
directory in filenames
"""
expanded = set()
for filename in filenames:
if not os.path.isdir(filename):
expanded.add(filename)
continue
for root, _, files in os.walk(filename):
for loopfile in files:
fullname = os.path.join(root, loopfile)
if fullname.startswith('.' + os.path.sep):
fullname = fullname[len('.' + os.path.sep):]
expanded.add(fullname)
filtered = []
for filename in expanded:
if os.path.splitext(filename)[1][1:] in GetAllExtensions():
filtered.append(filename)
return filtered
def _FilterExcludedFiles(filenames):
"""Filters out files listed in the --exclude command line switch. File paths
in the switch are evaluated relative to the current working directory
"""
exclude_paths = [os.path.abspath(f) for f in _excludes]
return [f for f in filenames if os.path.abspath(f) not in exclude_paths]
def main():
filenames = ParseArguments(sys.argv[1:])
backup_err = sys.stderr
try:
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
if _cpplint_state.output_format == 'junit':
sys.stderr.write(_cpplint_state.FormatJUnitXML())
finally:
sys.stderr = backup_err
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 38.546071 | 97 | 0.652388 |
4a22991a3c02fd56aaef95f5336f0512db10e9c3 | 3,068 | py | Python | pyrobolearn/robots/mkz.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 2 | 2021-01-21T21:08:30.000Z | 2022-03-29T16:45:49.000Z | pyrobolearn/robots/mkz.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | null | null | null | pyrobolearn/robots/mkz.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 1 | 2020-09-29T21:25:39.000Z | 2020-09-29T21:25:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide the Lincoln MKZ car robotic platform.
"""
import os
import numpy as np
from pyrobolearn.robots.wheeled_robot import AckermannWheeledRobot
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "[email protected]"
__status__ = "Development"
class MKZ(AckermannWheeledRobot):
r"""Lincoln MKZ car
Drive-by-wire interface to the Dataspeed Inc. Lincoln MKZ DBW kit.
References:
- [1] Dataspeed Inc.: https://www.dataspeedinc.com/
- [2] ROS wiki: http://wiki.ros.org/dbw_mkz
- [3] Bitbucket: https://bitbucket.org/DataspeedInc/dbw_mkz_ros
"""
def __init__(self, simulator, position=(0, 0, .4), orientation=(0, 0, 0, 1), fixed_base=False, scale=1.,
urdf=os.path.dirname(__file__) + '/urdfs/mkz/mkz.urdf'):
"""
Initialize the MKZ car.
Args:
simulator (Simulator): simulator instance.
position (np.array[float[3]]): Cartesian world position.
orientation (np.array[float[4]]): Cartesian world orientation expressed as a quaternion [x,y,z,w].
fixed_base (bool): if True, the robot base will be fixed in the world.
scale (float): scaling factor that is used to scale the robot.
urdf (str): path to the urdf. Do not change it unless you know what you are doing.
"""
# check parameters
if position is None:
position = (0., 0., 0.4)
if len(position) == 2: # assume x, y are given
position = tuple(position) + (0.4,)
if orientation is None:
orientation = (0, 0, 0, 1)
if fixed_base is None:
fixed_base = False
super(MKZ, self).__init__(simulator, urdf, position, orientation, fixed_base, scale)
self.name = 'mkz'
self.wheels = [self.get_link_ids(link) for link in ['wheel_fl', 'wheel_fr', 'wheel_rl', 'wheel_rr']
if link in self.link_names]
self.wheel_directions = np.ones(len(self.wheels))
self.steering = [self.get_link_ids(link) for link in ['steer_fl', 'steer_fr']
if link in self.link_names]
def steer(self, angle):
"""Set steering angle"""
angle = angle * np.ones(len(self.steering))
self.set_joint_positions(angle, joint_ids=self.steering)
# Test
if __name__ == "__main__":
from itertools import count
from pyrobolearn.simulators import Bullet
from pyrobolearn.worlds import BasicWorld
# Create simulator
sim = Bullet()
# create world
world = BasicWorld(sim)
# create robot
robot = MKZ(sim)
# print information about the robot
robot.print_info()
# Position control using sliders
# robot.add_joint_slider()
# run simulator
for _ in count():
# robot.update_joint_slider()
robot.drive_forward(2)
world.step(sleep_dt=1./240)
| 31.958333 | 110 | 0.629074 |
4a229ab9187e887b8b623079af4df58cf85806c3 | 8,614 | py | Python | ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py | Arenadata/ambari | 4628267441121779113d98936dcdf5d9be60553c | [
"Apache-2.0"
] | 5 | 2017-07-20T11:15:10.000Z | 2020-04-16T15:42:55.000Z | ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py | Arenadata/ambari | 4628267441121779113d98936dcdf5d9be60553c | [
"Apache-2.0"
] | 8 | 2020-06-18T17:31:19.000Z | 2022-03-02T08:32:03.000Z | ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py | Arenadata/ambari | 4628267441121779113d98936dcdf5d9be60553c | [
"Apache-2.0"
] | 12 | 2017-05-17T09:48:01.000Z | 2021-08-05T19:01:25.000Z | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import os
# Local Imports
from oozie import copy_atlas_hive_hook_to_dfs_share_lib
# Resource Managemente Imports
from resource_management.core import shell, sudo
from resource_management import *
from resource_management.core.shell import as_user
from resource_management.core.logger import Logger
from resource_management.libraries.functions.show_logs import show_logs
from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
from resource_management.core import Logger
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def oozie_service(action='start', upgrade_type=None):
import params
if action == 'start':
cmd = format("cmd /C \"cd /d {oozie_tmp_dir} && {oozie_home}\\bin\\ooziedb.cmd create -sqlfile oozie.sql -run\"")
Execute(cmd, user=params.oozie_user, ignore_failures=True)
Service(params.oozie_server_win_service_name, action="start")
elif action == 'stop':
Service(params.oozie_server_win_service_name, action="stop")
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def oozie_service(action = 'start', upgrade_type=None):
"""
Starts or stops the Oozie service
:param action: 'start' or 'stop'
:param upgrade_type: type of upgrade, either "rolling" or "non_rolling"
skipped since a variation of them was performed during the rolling upgrade
:return:
"""
import params
environment={'OOZIE_CONFIG': params.conf_dir}
if params.security_enabled:
if params.oozie_principal is None:
oozie_principal_with_host = 'missing_principal'
else:
oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host};")
else:
kinit_if_needed = ""
no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
if action == 'start':
start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-start.sh")
path_to_jdbc = params.target
if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
params.jdbc_driver_name == "org.postgresql.Driver" or \
params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
if not params.jdbc_driver_jar:
path_to_jdbc = format("{oozie_libext_dir}/") + \
params.default_connectors_map[params.jdbc_driver_name] if params.jdbc_driver_name in params.default_connectors_map else None
if not os.path.isfile(path_to_jdbc):
path_to_jdbc = format("{oozie_libext_dir}/") + "*"
error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.jdbc_driver_name] + \
" in oozie lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
Logger.error(error_message)
db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
else:
db_connection_check_command = None
if upgrade_type is None:
if not os.path.isfile(path_to_jdbc) and params.jdbc_driver_name == "org.postgresql.Driver":
print format("ERROR: jdbc file {target} is unavailable. Please, follow next steps:\n" \
"1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
"3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
"{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
"/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
exit(1)
if db_connection_check_command:
sudo.chmod(params.check_db_connection_jar, 0755)
Execute( db_connection_check_command,
tries=5,
try_sleep=10,
user=params.oozie_user,
)
if params.sysprep_skip_oozie_schema_create:
Logger.info("Skipping creation of oozie schema as host is sys prepped")
else:
Execute( format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run"),
user = params.oozie_user, not_if = no_op_test,
ignore_failures = True
)
if params.security_enabled:
Execute(kinit_if_needed,
user = params.oozie_user,
)
if params.sysprep_skip_copy_oozie_share_lib_to_hdfs:
Logger.info("Skipping creation of oozie sharelib as host is sys prepped")
# Copy current hive-site to hdfs:/user/oozie/share/lib/spark/
params.HdfsResource(format("{hdfs_share_dir}/lib/spark/hive-site.xml"),
action="create_on_execute",
type = 'file',
mode=0444,
owner=params.oozie_user,
group=params.user_group,
source=format("{hive_conf_dir}/hive-site.xml"),
)
params.HdfsResource(None, action="execute")
hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
# check with webhdfs is much faster than executing hadoop fs -ls.
util = WebHDFSUtil(params.hdfs_site, params.oozie_user, params.security_enabled)
list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
hdfs_share_dir_exists = ('FileStatus' in list_status)
else:
# have to do time expensive hadoop fs -ls check.
hdfs_share_dir_exists = shell.call(format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls {hdfs_share_dir} | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
user=params.oozie_user)[0]
if not hdfs_share_dir_exists:
Execute( params.put_shared_lib_to_hdfs_cmd,
user = params.oozie_user,
path = params.execute_path
)
params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
type="directory",
action="create_on_execute",
mode=0755,
recursive_chmod=True,
)
params.HdfsResource(None, action="execute")
try:
# start oozie
Execute( start_cmd, environment=environment, user = params.oozie_user,
not_if = no_op_test )
copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type, params.upgrade_direction)
except:
show_logs(params.oozie_log_dir, params.oozie_user)
raise
elif action == 'stop':
Directory(params.oozie_tmp_dir,
owner=params.oozie_user,
create_parents = True,
)
stop_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozied.sh stop 60 -force")
try:
# stop oozie
Execute(stop_cmd, environment=environment, only_if = no_op_test,
user = params.oozie_user)
except:
show_logs(params.oozie_log_dir, params.oozie_user)
raise
File(params.pid_file, action = "delete")
| 45.336842 | 270 | 0.678895 |
4a229b60350211dcb0766c487eae4d60f5f634b3 | 448 | py | Python | Kattis (Tae Ho Kim)/Transit Woes/Answer.py | kim4t/Kattis | 67a0aef92d65f5f9294e9a3338ef68e38e697c1c | [
"Unlicense"
] | null | null | null | Kattis (Tae Ho Kim)/Transit Woes/Answer.py | kim4t/Kattis | 67a0aef92d65f5f9294e9a3338ef68e38e697c1c | [
"Unlicense"
] | null | null | null | Kattis (Tae Ho Kim)/Transit Woes/Answer.py | kim4t/Kattis | 67a0aef92d65f5f9294e9a3338ef68e38e697c1c | [
"Unlicense"
] | null | null | null | s,t,n = map(int,input().split())
d = input().split()
b = input().split()
c = input().split()
total = 0
for i in d:
total += int(i)
for i in b:
total += int(i)
sum = int(d[0])
stop = [sum]
for i in range(n-1):
sum+= int(d[i])+int(b[i])
stop.append(sum)
for i in range(n):
x = stop[i]
y = int(c[i])
dif = 0
if x<y:
dif = y-x
else: dif = y-(x%y)
total+= dif
if total<=t : print('yes')
else: print('no') | 17.92 | 32 | 0.504464 |
4a229cebf1fde8dbb7687f304b9e64def06dbff8 | 5,993 | py | Python | tools/auth.py | GeorgeGreenawayShow/messaging | 0535c12fba2f02a6ff78721de2bf2d92abec7243 | [
"MIT"
] | 1 | 2020-03-29T12:34:05.000Z | 2020-03-29T12:34:05.000Z | tools/auth.py | GeorgeGreenawayShow/messaging | 0535c12fba2f02a6ff78721de2bf2d92abec7243 | [
"MIT"
] | 1 | 2021-08-31T00:30:56.000Z | 2021-08-31T00:30:56.000Z | tools/auth.py | GeorgeGreenawayShow/messaging | 0535c12fba2f02a6ff78721de2bf2d92abec7243 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import fire
import requests
import config
import os
def read_token_file():
if os.path.exists("token.txt"):
with open("token.txt", "r+") as f:
token = f.read()
if "RST-" in token:
print("🔑❗ Logged in with a password reset token, most commands won't work. Login as another user or set a new password.")
return token
else:
print("😢 No token file found, please login with [login]")
return False
def user_readout(user):
print(f"🎉 Hi '{user['username']}' - Your session token is valid!")
print(f"ℹ This session expires at {user['token_expires']}")
if user['username'] == "setup":
print("‼ Caution, you're logged in as the setup account, it's highly recommended you make another account and delete this one.")
def print_user(user):
print(f"🧑 Username: {user['username']}")
print(f"🏠 Logged in: {user['logged_in']}")
if "session_expires" in user:
print(f"⌚ Session expires at: {user['session_expires']}")
if user['avatar']:
print(f"🖼 Avatar: {user['avatar']}")
if "reset_required" in user:
print("❗ User must change password at next login.")
print("\n")
class Commands:
def login(self, username="setup"):
"""Login to Auth server (uses setup account by default)"""
print(f"🔑 Logging in as {username}")
password = input("🔑 Password (default: password): ")
r = requests.post(f"{config.auth_server}/api/auth/login", json={
"username": username,
"password": password
})
if r.status_code == 200:
print("🎉 Successfully logged in! Writing token to file.")
token = r.json()['token']
if "RST-" in token:
print("❗ Logged in with a password reset token, most commands won't work. Use auth password to set a new password.")
with open("token.txt", "w+") as f:
f.write(token)
elif r.status_code == 401:
print(f"❌ Unauthorised: {r.json()}")
else:
print(f"😢 Communication error: {r.status_code}")
def check(self):
"""Checks if the token file is valid"""
token = read_token_file()
if token:
r = requests.get(f"{config.auth_server}/api/auth", headers={"Authorization": token})
if r.status_code == 200:
print("🎉 The session token is valid!")
data = r.json()
user_readout(data)
else:
print("😭 Invalid token, login again with [login]")
def logout(self, user="self"):
"""Logout user, 'self' to logout own account"""
token = read_token_file()
if token and user == "self":
r = requests.delete(f"{config.auth_server}/api/auth", headers={"authorization": token})
os.remove("token.txt")
if r.status_code == 204:
print("👋 Logged out, bye!")
else:
print("‼ Un-graceful logout, deleting file.")
else:
r = requests.delete(f"{config.auth_server}/api/user/{user}/session", headers={"authorization": token})
if r.status_code == 204:
print(f"👋 Logged out {user}")
else:
print("‼ Failed to logout.")
def users(self):
"""Get a list of all users"""
token = read_token_file()
if token:
r = requests.get(f"{config.auth_server}/api/users", headers={"authorization": token})
if r.status_code == 200:
users = r.json()
for user in users:
print_user(user)
def create(self, user, avatar=""):
"""Create a new user"""
token = read_token_file()
if token:
password = input("🔑 Password for user (blank for auto): ")
if password != "":
obj = {"username": user, "password": password, "avatar": avatar}
else:
obj = {"username": user, "avatar": avatar}
r = requests.put(f"{config.auth_server}/api/user", json=obj, headers={"authorization": token})
if r.status_code == 204:
print(f"✅ Created new user: {user}")
elif r.status_code == 200:
print(f"✅ Created new user: {user}, temporary password: {r.json()['password']}")
elif r.status_code == 409:
print(f"😒 User already exits")
else:
print("Auth issue.")
def delete(self, user):
"""Delete a user"""
token = read_token_file()
if token:
r = requests.delete(f"{config.auth_server}/api/user/{user}", headers={"authorization": token})
if r.status_code == 204:
print(f"🗑 User {user} deleted.")
elif r.status_code == 404:
print("❌ Invalid user.")
else:
print("Auth issue.")
def password(self, username):
"""Set a new password"""
token = read_token_file()
if token:
password = input("🔑 New password (blank for auto): ")
if password == "":
obj = {"user": username}
else:
obj = {"user": username, "password": password}
r = requests.post(f"{config.auth_server}/api/user/reset", json=obj, headers={"authorization": token})
if r.status_code == 204:
print("🎉 Password updated.")
elif r.status_code == 200:
print(f"🎉 Password updated. (🔑 temporary password: {r.json()['password']})")
elif r.status_code == 401:
print(f"❗ Invalid username/password or you're trying to change a password of another user while using a password-reset token.")
else:
print(f"😢 Unknown communication error: {r.status_code} {r.text}")
if __name__ == "__main__":
fire.Fire(Commands)
| 39.688742 | 143 | 0.542466 |
4a229cfb509c4568da640a057c48ca45e39ebc37 | 31 | py | Python | torch/ao/nn/__init__.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 60,067 | 2017-01-18T17:21:31.000Z | 2022-03-31T21:37:45.000Z | torch/ao/nn/__init__.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 66,955 | 2017-01-18T17:21:38.000Z | 2022-03-31T23:56:11.000Z | torch/ao/nn/__init__.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 19,210 | 2017-01-18T17:45:04.000Z | 2022-03-31T23:51:56.000Z | from torch.ao.nn import sparse
| 15.5 | 30 | 0.806452 |
4a229d1b9783e8d75ed7b0e1456a652fc7a12cc4 | 28,055 | py | Python | wokkel/test/test_disco.py | dustin/wokkel | 2dae171b77adb790fda54e5f66e4bfc40c3b28ac | [
"MIT"
] | 1 | 2015-11-04T10:33:54.000Z | 2015-11-04T10:33:54.000Z | wokkel/test/test_disco.py | dustin/wokkel | 2dae171b77adb790fda54e5f66e4bfc40c3b28ac | [
"MIT"
] | null | null | null | wokkel/test/test_disco.py | dustin/wokkel | 2dae171b77adb790fda54e5f66e4bfc40c3b28ac | [
"MIT"
] | null | null | null | # Copyright (c) 2003-2009 Ralph Meijer
# See LICENSE for details.
"""
Tests for L{wokkel.disco}.
"""
from zope.interface import implements
from twisted.internet import defer
from twisted.trial import unittest
from twisted.words.protocols.jabber.jid import JID
from twisted.words.protocols.jabber.xmlstream import toResponse
from twisted.words.xish import domish
from wokkel import data_form, disco
from wokkel.generic import parseXml
from wokkel.subprotocols import XMPPHandler
from wokkel.test.helpers import TestableRequestHandlerMixin, XmlStreamStub
NS_DISCO_INFO = 'http://jabber.org/protocol/disco#info'
NS_DISCO_ITEMS = 'http://jabber.org/protocol/disco#items'
class DiscoFeatureTest(unittest.TestCase):
"""
Tests for L{disco.DiscoFeature}.
"""
def test_init(self):
"""
Test initialization with a with feature namespace URI.
"""
feature = disco.DiscoFeature(u'testns')
self.assertEqual(u'testns', feature)
def test_toElement(self):
"""
Test proper rendering to a DOM representation.
The returned element should be properly named and have a C{var}
attribute that holds the feature namespace URI.
"""
feature = disco.DiscoFeature(u'testns')
element = feature.toElement()
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(u'feature', element.name)
self.assertTrue(element.hasAttribute(u'var'))
self.assertEqual(u'testns', element[u'var'])
def test_fromElement(self):
"""
Test creating L{disco.DiscoFeature} from L{domish.Element}.
"""
element = domish.Element((NS_DISCO_INFO, u'feature'))
element['var'] = u'testns'
feature = disco.DiscoFeature.fromElement(element)
self.assertEqual(u'testns', feature)
class DiscoIdentityTest(unittest.TestCase):
"""
Tests for L{disco.DiscoIdentity}.
"""
def test_init(self):
"""
Test initialization with a category, type and name.
"""
identity = disco.DiscoIdentity(u'conference', u'text', u'The chatroom')
self.assertEqual(u'conference', identity.category)
self.assertEqual(u'text', identity.type)
self.assertEqual(u'The chatroom', identity.name)
def test_toElement(self):
"""
Test proper rendering to a DOM representation.
The returned element should be properly named and have C{conference},
C{type}, and C{name} attributes.
"""
identity = disco.DiscoIdentity(u'conference', u'text', u'The chatroom')
element = identity.toElement()
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(u'identity', element.name)
self.assertEqual(u'conference', element.getAttribute(u'category'))
self.assertEqual(u'text', element.getAttribute(u'type'))
self.assertEqual(u'The chatroom', element.getAttribute(u'name'))
def test_toElementWithoutName(self):
"""
Test proper rendering to a DOM representation without a name.
The returned element should be properly named and have C{conference},
C{type} attributes, no C{name} attribute.
"""
identity = disco.DiscoIdentity(u'conference', u'text')
element = identity.toElement()
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(u'identity', element.name)
self.assertEqual(u'conference', element.getAttribute(u'category'))
self.assertEqual(u'text', element.getAttribute(u'type'))
self.assertFalse(element.hasAttribute(u'name'))
def test_fromElement(self):
"""
Test creating L{disco.DiscoIdentity} from L{domish.Element}.
"""
element = domish.Element((NS_DISCO_INFO, u'identity'))
element['category'] = u'conference'
element['type'] = u'text'
element['name'] = u'The chatroom'
identity = disco.DiscoIdentity.fromElement(element)
self.assertEqual(u'conference', identity.category)
self.assertEqual(u'text', identity.type)
self.assertEqual(u'The chatroom', identity.name)
def test_fromElementWithoutName(self):
"""
Test creating L{disco.DiscoIdentity} from L{domish.Element}, no name.
"""
element = domish.Element((NS_DISCO_INFO, u'identity'))
element['category'] = u'conference'
element['type'] = u'text'
identity = disco.DiscoIdentity.fromElement(element)
self.assertEqual(u'conference', identity.category)
self.assertEqual(u'text', identity.type)
self.assertEqual(None, identity.name)
class DiscoInfoTest(unittest.TestCase):
"""
Tests for L{disco.DiscoInfo}.
"""
def test_toElement(self):
"""
Test C{toElement} creates a correctly namespaced element, no node.
"""
info = disco.DiscoInfo()
element = info.toElement()
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(u'query', element.name)
self.assertFalse(element.hasAttribute(u'node'))
def test_toElementNode(self):
"""
Test C{toElement} with a node.
"""
info = disco.DiscoInfo()
info.nodeIdentifier = u'test'
element = info.toElement()
self.assertEqual(u'test', element.getAttribute(u'node'))
def test_toElementChildren(self):
"""
Test C{toElement} creates a DOM with proper childs.
"""
info = disco.DiscoInfo()
info.append(disco.DiscoFeature(u'jabber:iq:register'))
info.append(disco.DiscoIdentity(u'conference', u'text'))
info.append(data_form.Form(u'result'))
element = info.toElement()
featureElements = domish.generateElementsQNamed(element.children,
u'feature',
NS_DISCO_INFO)
self.assertEqual(1, len(list(featureElements)))
identityElements = domish.generateElementsQNamed(element.children,
u'identity',
NS_DISCO_INFO)
self.assertEqual(1, len(list(identityElements)))
extensionElements = domish.generateElementsQNamed(element.children,
u'x',
data_form.NS_X_DATA)
self.assertEqual(1, len(list(extensionElements)))
def test_fromElement(self):
"""
Test properties when creating L{disco.DiscoInfo} from L{domish.Element}.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#info'>
<identity category='conference'
type='text'
name='A Dark Cave'/>
<feature var='http://jabber.org/protocol/muc'/>
<feature var='jabber:iq:register'/>
<x xmlns='jabber:x:data' type='result'>
<field var='FORM_TYPE' type='hidden'>
<value>http://jabber.org/protocol/muc#roominfo</value>
</field>
</x>
</query>"""
element = parseXml(xml)
info = disco.DiscoInfo.fromElement(element)
self.assertIn(u'http://jabber.org/protocol/muc', info.features)
self.assertIn(u'jabber:iq:register', info.features)
self.assertIn((u'conference', u'text'), info.identities)
self.assertEqual(u'A Dark Cave',
info.identities[(u'conference', u'text')])
self.assertIn(u'http://jabber.org/protocol/muc#roominfo',
info.extensions)
def test_fromElementItems(self):
"""
Test items when creating L{disco.DiscoInfo} from L{domish.Element}.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#info'>
<identity category='conference'
type='text'
name='A Dark Cave'/>
<feature var='http://jabber.org/protocol/muc'/>
<feature var='jabber:iq:register'/>
<x xmlns='jabber:x:data' type='result'>
<field var='FORM_TYPE' type='hidden'>
<value>http://jabber.org/protocol/muc#roominfo</value>
</field>
</x>
</query>"""
element = parseXml(xml)
info = disco.DiscoInfo.fromElement(element)
info = list(info)
self.assertEqual(4, len(info))
identity = info[0]
self.assertEqual(u'conference', identity.category)
self.assertEqual(u'http://jabber.org/protocol/muc', info[1])
self.assertEqual(u'jabber:iq:register', info[2])
extension = info[3]
self.assertEqual(u'http://jabber.org/protocol/muc#roominfo',
extension.formNamespace)
def test_fromElementNoNode(self):
"""
Test creating L{disco.DiscoInfo} from L{domish.Element}, no node.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#info'/>"""
element = parseXml(xml)
info = disco.DiscoInfo.fromElement(element)
self.assertEqual(u'', info.nodeIdentifier)
def test_fromElementNode(self):
"""
Test creating L{disco.DiscoInfo} from L{domish.Element}, with node.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#info'
node='test'>
</query>"""
element = parseXml(xml)
info = disco.DiscoInfo.fromElement(element)
self.assertEqual(u'test', info.nodeIdentifier)
class DiscoItemTest(unittest.TestCase):
"""
Tests for L{disco.DiscoItem}.
"""
def test_init(self):
"""
Test initialization with a category, type and name.
"""
item = disco.DiscoItem(JID(u'example.org'), u'test', u'The node')
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'test', item.nodeIdentifier)
self.assertEqual(u'The node', item.name)
def test_toElement(self):
"""
Test proper rendering to a DOM representation.
The returned element should be properly named and have C{jid}, C{node},
and C{name} attributes.
"""
item = disco.DiscoItem(JID(u'example.org'), u'test', u'The node')
element = item.toElement()
self.assertEqual(NS_DISCO_ITEMS, element.uri)
self.assertEqual(u'item', element.name)
self.assertEqual(u'example.org', element.getAttribute(u'jid'))
self.assertEqual(u'test', element.getAttribute(u'node'))
self.assertEqual(u'The node', element.getAttribute(u'name'))
def test_toElementWithoutName(self):
"""
Test proper rendering to a DOM representation without a name.
The returned element should be properly named and have C{jid}, C{node}
attributes, no C{name} attribute.
"""
item = disco.DiscoItem(JID(u'example.org'), u'test')
element = item.toElement()
self.assertEqual(NS_DISCO_ITEMS, element.uri)
self.assertEqual(u'item', element.name)
self.assertEqual(u'example.org', element.getAttribute(u'jid'))
self.assertEqual(u'test', element.getAttribute(u'node'))
self.assertFalse(element.hasAttribute(u'name'))
def test_fromElement(self):
"""
Test creating L{disco.DiscoItem} from L{domish.Element}.
"""
element = domish.Element((NS_DISCO_ITEMS, u'item'))
element[u'jid'] = u'example.org'
element[u'node'] = u'test'
element[u'name'] = u'The node'
item = disco.DiscoItem.fromElement(element)
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'test', item.nodeIdentifier)
self.assertEqual(u'The node', item.name)
def test_fromElementNoNode(self):
"""
Test creating L{disco.DiscoItem} from L{domish.Element}, no node.
"""
element = domish.Element((NS_DISCO_ITEMS, u'item'))
element[u'jid'] = u'example.org'
element[u'name'] = u'The node'
item = disco.DiscoItem.fromElement(element)
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'', item.nodeIdentifier)
self.assertEqual(u'The node', item.name)
def test_fromElementNoName(self):
"""
Test creating L{disco.DiscoItem} from L{domish.Element}, no name.
"""
element = domish.Element((NS_DISCO_ITEMS, u'item'))
element[u'jid'] = u'example.org'
element[u'node'] = u'test'
item = disco.DiscoItem.fromElement(element)
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'test', item.nodeIdentifier)
self.assertEqual(None, item.name)
def test_fromElementBadJID(self):
"""
Test creating L{disco.DiscoItem} from L{domish.Element}, bad JID.
"""
element = domish.Element((NS_DISCO_ITEMS, u'item'))
element[u'jid'] = u'ex@@@ample.org'
item = disco.DiscoItem.fromElement(element)
self.assertIdentical(None, item.entity)
class DiscoItemsTest(unittest.TestCase):
"""
Tests for L{disco.DiscoItems}.
"""
def test_toElement(self):
"""
Test C{toElement} creates a correctly namespaced element, no node.
"""
items = disco.DiscoItems()
element = items.toElement()
self.assertEqual(NS_DISCO_ITEMS, element.uri)
self.assertEqual(u'query', element.name)
self.assertFalse(element.hasAttribute(u'node'))
def test_toElementNode(self):
"""
Test C{toElement} with a node.
"""
items = disco.DiscoItems()
items.nodeIdentifier = u'test'
element = items.toElement()
self.assertEqual(u'test', element.getAttribute(u'node'))
def test_toElementChildren(self):
"""
Test C{toElement} creates a DOM with proper childs.
"""
items = disco.DiscoItems()
items.append(disco.DiscoItem(JID(u'example.org'), u'test', u'A node'))
element = items.toElement()
itemElements = domish.generateElementsQNamed(element.children,
u'item',
NS_DISCO_ITEMS)
self.assertEqual(1, len(list(itemElements)))
def test_fromElement(self):
"""
Test creating L{disco.DiscoItems} from L{domish.Element}.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#items'>
<item jid='example.org' node='test' name='A node'/>
</query>"""
element = parseXml(xml)
items = disco.DiscoItems.fromElement(element)
items = list(items)
self.assertEqual(1, len(items))
item = items[0]
self.assertEqual(JID(u'example.org'), item.entity)
self.assertEqual(u'test', item.nodeIdentifier)
self.assertEqual(u'A node', item.name)
def test_fromElementNoNode(self):
"""
Test creating L{disco.DiscoItems} from L{domish.Element}, no node.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#items'/>"""
element = parseXml(xml)
items = disco.DiscoItems.fromElement(element)
self.assertEqual(u'', items.nodeIdentifier)
def test_fromElementNode(self):
"""
Test creating L{disco.DiscoItems} from L{domish.Element}, with node.
"""
xml = """<query xmlns='http://jabber.org/protocol/disco#items'
node='test'>
</query>"""
element = parseXml(xml)
items = disco.DiscoItems.fromElement(element)
self.assertEqual(u'test', items.nodeIdentifier)
class DiscoClientProtocolTest(unittest.TestCase):
"""
Tests for L{disco.DiscoClientProtocol}.
"""
def setUp(self):
"""
Set up stub and protocol for testing.
"""
self.stub = XmlStreamStub()
self.protocol = disco.DiscoClientProtocol()
self.protocol.xmlstream = self.stub.xmlstream
self.protocol.connectionInitialized()
def test_requestItems(self):
"""
Test request sent out by C{requestItems} and parsing of response.
"""
def cb(items):
items = list(items)
self.assertEqual(2, len(items))
self.assertEqual(JID(u'test.example.org'), items[0].entity)
d = self.protocol.requestItems(JID(u'example.org'),u"foo")
d.addCallback(cb)
iq = self.stub.output[-1]
self.assertEqual(u'example.org', iq.getAttribute(u'to'))
self.assertEqual(u'get', iq.getAttribute(u'type'))
self.assertEqual(u'foo', iq.query.getAttribute(u'node'))
self.assertEqual(NS_DISCO_ITEMS, iq.query.uri)
response = toResponse(iq, u'result')
query = response.addElement((NS_DISCO_ITEMS, u'query'))
element = query.addElement(u'item')
element[u'jid'] = u'test.example.org'
element[u'node'] = u'music'
element[u'name'] = u'Music from the time of Shakespeare'
element = query.addElement(u'item')
element[u'jid'] = u"test2.example.org"
self.stub.send(response)
return d
def test_requestItemsFrom(self):
"""
A disco items request can be sent with an explicit sender address.
"""
d = self.protocol.requestItems(JID(u'example.org'),
sender=JID(u'test.example.org'))
iq = self.stub.output[-1]
self.assertEqual(u'test.example.org', iq.getAttribute(u'from'))
response = toResponse(iq, u'result')
response.addElement((NS_DISCO_ITEMS, u'query'))
self.stub.send(response)
return d
def test_requestInfo(self):
"""
Test request sent out by C{requestInfo} and parsing of response.
"""
def cb(info):
self.assertIn((u'conference', u'text'), info.identities)
self.assertIn(u'http://jabber.org/protocol/disco#info',
info.features)
self.assertIn(u'http://jabber.org/protocol/muc',
info.features)
d = self.protocol.requestInfo(JID(u'example.org'),'foo')
d.addCallback(cb)
iq = self.stub.output[-1]
self.assertEqual(u'example.org', iq.getAttribute(u'to'))
self.assertEqual(u'get', iq.getAttribute(u'type'))
self.assertEqual(u'foo', iq.query.getAttribute(u'node'))
self.assertEqual(NS_DISCO_INFO, iq.query.uri)
response = toResponse(iq, u'result')
query = response.addElement((NS_DISCO_INFO, u'query'))
element = query.addElement(u"identity")
element[u'category'] = u'conference' # required
element[u'type'] = u'text' # required
element[u"name"] = u'Romeo and Juliet, Act II, Scene II' # optional
element = query.addElement("feature")
element[u'var'] = u'http://jabber.org/protocol/disco#info' # required
element = query.addElement(u"feature")
element[u'var'] = u'http://jabber.org/protocol/muc'
self.stub.send(response)
return d
def test_requestInfoFrom(self):
"""
A disco info request can be sent with an explicit sender address.
"""
d = self.protocol.requestInfo(JID(u'example.org'),
sender=JID(u'test.example.org'))
iq = self.stub.output[-1]
self.assertEqual(u'test.example.org', iq.getAttribute(u'from'))
response = toResponse(iq, u'result')
response.addElement((NS_DISCO_INFO, u'query'))
self.stub.send(response)
return d
class DiscoHandlerTest(unittest.TestCase, TestableRequestHandlerMixin):
"""
Tests for L{disco.DiscoHandler}.
"""
def setUp(self):
self.service = disco.DiscoHandler()
def test_onDiscoInfo(self):
"""
C{onDiscoInfo} should process an info request and return a response.
The request should be parsed, C{info} called with the extracted
parameters, and then the result should be formatted into a proper
response element.
"""
xml = """<iq from='[email protected]' to='example.com'
type='get'>
<query xmlns='%s'/>
</iq>""" % NS_DISCO_INFO
def cb(element):
self.assertEqual('query', element.name)
self.assertEqual(NS_DISCO_INFO, element.uri)
self.assertEqual(NS_DISCO_INFO, element.identity.uri)
self.assertEqual('dummy', element.identity['category'])
self.assertEqual('generic', element.identity['type'])
self.assertEqual('Generic Dummy Entity', element.identity['name'])
self.assertEqual(NS_DISCO_INFO, element.feature.uri)
self.assertEqual('jabber:iq:version', element.feature['var'])
def info(requestor, target, nodeIdentifier):
self.assertEqual(JID('[email protected]'), requestor)
self.assertEqual(JID('example.com'), target)
self.assertEqual('', nodeIdentifier)
return defer.succeed([
disco.DiscoIdentity('dummy', 'generic', 'Generic Dummy Entity'),
disco.DiscoFeature('jabber:iq:version')
])
self.service.info = info
d = self.handleRequest(xml)
d.addCallback(cb)
return d
def test_onDiscoInfoWithNode(self):
"""
An info request for a node should return it in the response.
"""
xml = """<iq from='[email protected]' to='example.com'
type='get'>
<query xmlns='%s' node='test'/>
</iq>""" % NS_DISCO_INFO
def cb(element):
self.assertTrue(element.hasAttribute('node'))
self.assertEqual('test', element['node'])
def info(requestor, target, nodeIdentifier):
self.assertEqual('test', nodeIdentifier)
return defer.succeed([
disco.DiscoFeature('jabber:iq:version')
])
self.service.info = info
d = self.handleRequest(xml)
d.addCallback(cb)
return d
def test_onDiscoItems(self):
"""
C{onDiscoItems} should process an items request and return a response.
The request should be parsed, C{items} called with the extracted
parameters, and then the result should be formatted into a proper
response element.
"""
xml = """<iq from='[email protected]' to='example.com'
type='get'>
<query xmlns='%s'/>
</iq>""" % NS_DISCO_ITEMS
def cb(element):
self.assertEqual('query', element.name)
self.assertEqual(NS_DISCO_ITEMS, element.uri)
self.assertEqual(NS_DISCO_ITEMS, element.item.uri)
self.assertEqual('example.com', element.item['jid'])
self.assertEqual('test', element.item['node'])
self.assertEqual('Test node', element.item['name'])
def items(requestor, target, nodeIdentifier):
self.assertEqual(JID('[email protected]'), requestor)
self.assertEqual(JID('example.com'), target)
self.assertEqual('', nodeIdentifier)
return defer.succeed([
disco.DiscoItem(JID('example.com'), 'test', 'Test node'),
])
self.service.items = items
d = self.handleRequest(xml)
d.addCallback(cb)
return d
def test_onDiscoItemsWithNode(self):
"""
An items request for a node should return it in the response.
"""
xml = """<iq from='[email protected]' to='example.com'
type='get'>
<query xmlns='%s' node='test'/>
</iq>""" % NS_DISCO_ITEMS
def cb(element):
self.assertTrue(element.hasAttribute('node'))
self.assertEqual('test', element['node'])
def items(requestor, target, nodeIdentifier):
self.assertEqual('test', nodeIdentifier)
return defer.succeed([
disco.DiscoFeature('jabber:iq:version')
])
self.service.items = items
d = self.handleRequest(xml)
d.addCallback(cb)
return d
def test_info(self):
"""
C{info} should gather disco info from sibling handlers.
"""
discoItems = [disco.DiscoIdentity('dummy', 'generic',
'Generic Dummy Entity'),
disco.DiscoFeature('jabber:iq:version')
]
class DiscoResponder(XMPPHandler):
implements(disco.IDisco)
def getDiscoInfo(self, requestor, target, nodeIdentifier):
if not nodeIdentifier:
return defer.succeed(discoItems)
else:
return defer.succeed([])
def cb(result):
self.assertEquals(discoItems, result)
self.service.parent = [self.service, DiscoResponder()]
d = self.service.info(JID('[email protected]'), JID('example.com'), '')
d.addCallback(cb)
return d
def test_infoNotDeferred(self):
"""
C{info} should gather disco info from sibling handlers.
"""
discoItems = [disco.DiscoIdentity('dummy', 'generic',
'Generic Dummy Entity'),
disco.DiscoFeature('jabber:iq:version')
]
class DiscoResponder(XMPPHandler):
implements(disco.IDisco)
def getDiscoInfo(self, requestor, target, nodeIdentifier):
if not nodeIdentifier:
return discoItems
else:
return []
def cb(result):
self.assertEquals(discoItems, result)
self.service.parent = [self.service, DiscoResponder()]
d = self.service.info(JID('[email protected]'), JID('example.com'), '')
d.addCallback(cb)
return d
def test_items(self):
"""
C{info} should gather disco items from sibling handlers.
"""
discoItems = [disco.DiscoItem(JID('example.com'), 'test', 'Test node')]
class DiscoResponder(XMPPHandler):
implements(disco.IDisco)
def getDiscoItems(self, requestor, target, nodeIdentifier):
if not nodeIdentifier:
return defer.succeed(discoItems)
else:
return defer.succeed([])
def cb(result):
self.assertEquals(discoItems, result)
self.service.parent = [self.service, DiscoResponder()]
d = self.service.items(JID('[email protected]'), JID('example.com'), '')
d.addCallback(cb)
return d
def test_itemsNotDeferred(self):
"""
C{info} should also collect results not returned via a deferred.
"""
discoItems = [disco.DiscoItem(JID('example.com'), 'test', 'Test node')]
class DiscoResponder(XMPPHandler):
implements(disco.IDisco)
def getDiscoItems(self, requestor, target, nodeIdentifier):
if not nodeIdentifier:
return discoItems
else:
return []
def cb(result):
self.assertEquals(discoItems, result)
self.service.parent = [self.service, DiscoResponder()]
d = self.service.items(JID('[email protected]'), JID('example.com'), '')
d.addCallback(cb)
return d
| 33.964891 | 80 | 0.584744 |
4a229f250e888af5f3c7e5b3743009e60bfb7753 | 5,405 | py | Python | config.py | KaihuaTang/VQA2.0-Recent-Approachs-2018.pytorch | 52e1ba5a7f3b88c617115ccc755e2e7868e8de2b | [
"MIT"
] | 298 | 2019-01-19T05:44:29.000Z | 2022-03-25T14:42:16.000Z | config.py | KaihuaTang/VQA2.0-Recent-Approachs-2018.pytorch | 52e1ba5a7f3b88c617115ccc755e2e7868e8de2b | [
"MIT"
] | 10 | 2019-02-10T06:10:29.000Z | 2021-05-19T13:49:33.000Z | config.py | KaihuaTang/VQA2.0-Recent-Approachs-2018.pytorch | 52e1ba5a7f3b88c617115ccc755e2e7868e8de2b | [
"MIT"
] | 54 | 2019-01-19T05:45:01.000Z | 2022-01-18T07:40:33.000Z | # paths
qa_path = 'data' # directory containing the question and annotation jsons
bottom_up_trainval_path = 'data/trainval' # directory containing the .tsv file(s) with bottom up features
bottom_up_test_path = 'data/test2015' # directory containing the .tsv file(s) with bottom up features
preprocessed_trainval_path = 'genome-trainval.h5' # path where preprocessed features from the trainval split are saved to and loaded from
preprocessed_test_path = 'genome-test.h5' # path where preprocessed features from the test split are saved to and loaded from
vocabulary_path = 'vocab.json' # path where the used vocabularies for question and answers are saved to
glove_index = 'data/dictionary.pkl'
result_json_path = 'results.json' # the path to save the test json that can be uploaded to vqa2.0 online evaluation server
task = 'OpenEnded'
dataset = 'mscoco'
test_split = 'test2015' # always 'test2015' since from 2018, vqa online evaluation server requires to upload entire test2015 result even for test-dev split
# preprocess config
output_size = 100 # max number of object proposals per image
output_features = 2048 # number of features in each object proposal
###################################################################
# Default Setting for All Model
###################################################################
# training config
epochs = 100
batch_size = 256
initial_lr = 1e-3
lr_decay_step = 2
lr_decay_rate = 0.25
lr_halflife = 50000 # for scheduler (counting)
data_workers = 4
max_answers = 3129
max_q_length = 666 # question_length = min(max_q_length, max_length_in_dataset)
clip_value = 0.25
v_feat_norm = False # Only useful in learning to count
print_gradient = False
normalize_box = False
seed = 5225
weight_decay = 0.0
model_type = 'baseline' # "Bottom-up top-down"
#model_type = 'inter_intra' # "Intra- and Inter-modality Attention"
#model_type = 'ban' # "Bilinear Attention Network"
#model_type = 'counting' # "Learning to count objects"
#model_type = 'graph' # "Learning Conditioned Graph Structures for Interpretable Visual Question Answering"
optim_method = 'Adamax' # used in "Bottom-up top-down", "Bilinear Attention Network", "Intra- and Inter-modality Attention"
#optim_method = 'Adam' # used in "Learning to count objects", set initial_lr to 1.5e-3
schedule_method = 'warm_up'
#schedule_method = 'batch_decay'
loss_method = 'binary_cross_entropy_with_logits'
#loss_method = 'soft_cross_entropy'
#loss_method = 'KL_divergence'
#loss_method = 'multi_label_soft_margin'
gradual_warmup_steps = [1.0 * initial_lr, 1.0 * initial_lr, 2.0 * initial_lr, 2.0 * initial_lr]
lr_decay_epochs = range(10, 100, lr_decay_step)
###################################################################
# Detailed Setting for Each Model
###################################################################
# "Bottom-up top-down"
# baseline Setting
if model_type == 'baseline':
loss_method = 'binary_cross_entropy_with_logits'
gradual_warmup_steps = [0.5 * initial_lr, 1.0 * initial_lr, 1.5 * initial_lr, 2.0 * initial_lr]
# "Intra- and Inter-modality Attention"
# inter_intra setting
elif model_type == 'inter_intra':
lr_decay_step = 10
max_q_length = 14
loss_method = 'binary_cross_entropy_with_logits'
gradual_warmup_steps = [1.0 * initial_lr, 1.0 * initial_lr, 2.0 * initial_lr, 2.0 * initial_lr]
# "Bilinear Attention Network"
# ban setting
elif model_type == 'ban':
batch_size = 128
lr_decay_step = 2
max_q_length = 14
loss_method = 'binary_cross_entropy_with_logits'
gradual_warmup_steps = [0.5 * initial_lr, 1.0 * initial_lr, 1.5 * initial_lr, 2.0 * initial_lr]
# "Learning to count objects"
# counting setting
elif model_type == 'counting':
optim_method = 'Adam'
schedule_method = 'batch_decay'
v_feat_norm = True
loss_method = 'soft_cross_entropy'
# "Learning Conditioned Graph Structures for Interpretable Visual Question Answering"
# graph setting
elif model_type == 'graph':
initial_lr = 1e-4
lr_decay_step = 10
lr_decay_rate = 0.5
normalize_box = True
loss_method = 'multi_label_soft_margin'
gradual_warmup_steps = [1.0 * initial_lr, 1.0 * initial_lr, 2.0 * initial_lr, 2.0 * initial_lr]
lr_decay_epochs = range(30, 100, lr_decay_step)
def print_param():
print('--------------------------------------------------')
print('Num obj: ', output_size)
print('Num epochs: ', epochs)
print('Batch size: ', batch_size)
print('Model type: ', model_type)
print('Optimization Method: ', optim_method)
print('Schedule Method: ', schedule_method)
print('Loss Method: ', loss_method)
print('Clip Value: ', clip_value)
print('Init LR: ', initial_lr)
print('LR decay step: ', lr_decay_step)
print('LR decay rate: ', lr_decay_rate)
print('LR half life: ', lr_halflife)
print('Normalize visual feature: ', v_feat_norm)
print('Print Gradient: ', print_gradient)
print('Normalize Box Size: ', normalize_box)
print('Max answer choice: ', max_answers)
print('Manually set max question lenght: ', max_q_length)
print('Random Seed: ', seed)
print('gradual_warmup_steps: ', gradual_warmup_steps)
print('Weight Decay: ', weight_decay)
print('--------------------------------------------------')
| 41.576923 | 156 | 0.670675 |
4a22a00afc40f3b0784428f2acf106d2ff9e00a0 | 5,387 | py | Python | pynodegl-utils/pynodegl_utils/examples/medias.py | mbouron/gopro-lib-node.gl | e888e889643be8e8e02c8b9f9159b393b4b54928 | [
"Apache-2.0"
] | null | null | null | pynodegl-utils/pynodegl_utils/examples/medias.py | mbouron/gopro-lib-node.gl | e888e889643be8e8e02c8b9f9159b393b4b54928 | [
"Apache-2.0"
] | null | null | null | pynodegl-utils/pynodegl_utils/examples/medias.py | mbouron/gopro-lib-node.gl | e888e889643be8e8e02c8b9f9159b393b4b54928 | [
"Apache-2.0"
] | null | null | null | import pynodegl as ngl
from pynodegl_utils.misc import scene
@scene(uv_corner=scene.Vector(n=2),
uv_width=scene.Vector(n=2),
uv_height=scene.Vector(n=2),
progress_bar=scene.Bool())
def centered_media(cfg, uv_corner=(0, 0), uv_width=(1, 0), uv_height=(0, 1), progress_bar=True):
'''A simple centered media with an optional progress bar in the shader'''
m0 = cfg.medias[0]
cfg.duration = m0.duration
cfg.aspect_ratio = (m0.width, m0.height)
q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0), uv_corner, uv_width, uv_height)
m = ngl.Media(m0.filename)
t = ngl.Texture2D(data_src=m)
p = ngl.Program(vertex=cfg.get_vert('texture'), fragment=cfg.get_frag('texture'))
p.update_vert_out_vars(var_tex0_coord=ngl.IOVec2(), var_uvcoord=ngl.IOVec2())
render = ngl.Render(q, p)
render.update_frag_resources(tex0=t)
if progress_bar:
p.set_fragment(cfg.get_frag('progress-bar'))
media_duration = ngl.UniformFloat(m0.duration)
ar = ngl.UniformFloat(cfg.aspect_ratio_float)
render.update_frag_resources(media_duration=media_duration, ar=ar)
return render
@scene(speed=scene.Range(range=[0.01, 2], unit_base=1000))
def playback_speed(cfg, speed=1.0):
'''Adjust media playback speed using animation keyframes'''
m0 = cfg.medias[0]
media_duration = m0.duration
initial_seek = min(media_duration, 5)
rush_duration = media_duration - initial_seek
cfg.duration = rush_duration / speed
cfg.aspect_ratio = (m0.width, m0.height)
q = ngl.Quad((-0.5, -0.5, 0), (1, 0, 0), (0, 1, 0))
time_animkf = [ngl.AnimKeyFrameFloat(0, initial_seek),
ngl.AnimKeyFrameFloat(cfg.duration, media_duration)]
m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(time_animkf))
t = ngl.Texture2D(data_src=m)
p = ngl.Program(vertex=cfg.get_vert('texture'), fragment=cfg.get_frag('texture'))
p.update_vert_out_vars(var_tex0_coord=ngl.IOVec2(), var_uvcoord=ngl.IOVec2())
render = ngl.Render(q, p)
render.update_frag_resources(tex0=t)
return render
@scene()
def time_remapping(cfg):
'''
Time remapping in the following order:
- nothing displayed for a while (but media prefetch happening in background)
- first frame displayed for a while
- normal playback
- last frame displayed for a while (even though the media is closed)
- nothing again until the end
'''
m0 = cfg.medias[0]
media_seek = 10
noop_duration = 2
prefetch_duration = 2
freeze_duration = 3
playback_duration = 5
range_start = noop_duration + prefetch_duration
play_start = range_start + freeze_duration
play_stop = play_start + playback_duration
range_stop = play_stop + freeze_duration
duration = range_stop + noop_duration
cfg.duration = duration
cfg.aspect_ratio = (m0.width, m0.height)
media_animkf = [
ngl.AnimKeyFrameFloat(play_start, media_seek),
ngl.AnimKeyFrameFloat(play_stop, media_seek + playback_duration),
]
q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0))
m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf))
m.set_sxplayer_min_level('verbose')
t = ngl.Texture2D(data_src=m)
p = ngl.Program(vertex=cfg.get_vert('texture'), fragment=cfg.get_frag('texture'))
p.update_vert_out_vars(var_tex0_coord=ngl.IOVec2(), var_uvcoord=ngl.IOVec2())
r = ngl.Render(q, p)
r.update_frag_resources(tex0=t)
time_ranges = [
ngl.TimeRangeModeNoop(0),
ngl.TimeRangeModeCont(range_start),
ngl.TimeRangeModeNoop(range_stop),
]
rf = ngl.TimeRangeFilter(r, ranges=time_ranges, prefetch_time=prefetch_duration)
base_string = 'media time: %2g to %2g\nscene time: %2g to %2g\ntime range: %2g to %2g' % (
media_seek, media_seek + playback_duration, play_start, play_stop, range_start, range_stop)
text = ngl.Text(base_string,
box_height=(0, 0.3, 0),
box_corner=(-1, 1 - 0.3, 0),
aspect_ratio=cfg.aspect_ratio,
halign='left')
group = ngl.Group()
group.add_children(rf, text)
steps = (
('default color, nothing yet', 0, noop_duration),
('default color, media prefetched', noop_duration, range_start),
('first frame', range_start, play_start),
('normal playback', play_start, play_stop),
('last frame', play_stop, range_stop),
('default color, media released', range_stop, duration),
)
for i, (description, start_time, end_time) in enumerate(steps):
text = ngl.Text('%g to %g: %s' % (start_time, end_time, description),
aspect_ratio=cfg.aspect_ratio,
box_height=(0, 0.2, 0))
text_tr = (
ngl.TimeRangeModeNoop(0),
ngl.TimeRangeModeCont(start_time),
ngl.TimeRangeModeNoop(end_time),
)
text_rf = ngl.TimeRangeFilter(text, ranges=text_tr, label='text-step-%d' % i)
group.add_children(text_rf)
return ngl.GraphicConfig(group, blend=True,
blend_src_factor='src_alpha',
blend_dst_factor='one_minus_src_alpha',
blend_src_factor_a='zero',
blend_dst_factor_a='one')
| 38.755396 | 109 | 0.6447 |
4a22a034ec63bbade40e890ef81df38456827d78 | 5,419 | py | Python | src/logo_only.py | NitikaGupta16/logohunter | f57e987bbd37d6316e701688f99d6e1fea6d406b | [
"MIT"
] | 128 | 2019-02-27T08:47:03.000Z | 2022-02-25T04:23:48.000Z | src/logo_only.py | AntonMu/logohunter | 4c6eb0931b3c100d4ed03334e26166d2429d5722 | [
"MIT"
] | 10 | 2019-12-11T07:17:27.000Z | 2022-03-27T06:30:55.000Z | src/logo_only.py | AntonMu/logohunter | 4c6eb0931b3c100d4ed03334e26166d2429d5722 | [
"MIT"
] | 32 | 2019-06-25T20:34:46.000Z | 2022-03-21T12:29:30.000Z | """
Run generic logo detection on input images, without matching to a specific brand
"""
import argparse
from keras_yolo3.yolo import YOLO
import os
import sys
from timeit import default_timer as timer
from logos import detect_logo, match_logo, detect_video
import utils
from utils import parse_input
output_txt = 'out.txt'
FLAGS = None
if __name__ == '__main__':
# class YOLO defines the default value, so suppress any default here
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
'''
Command line options
'''
parser.add_argument(
'--image', default=False, action="store_true",
help='Image detection mode'
)
parser.add_argument(
'--video', default=False, action="store_true",
help='Video detection mode'
)
parser.add_argument(
"--input_images", type=str, default='input',
help = "path to image directory or video to find logos in"
)
parser.add_argument(
"--output", type=str, default="../data/test/",
help = "output path: either directory for single/batch image, or filename for video"
)
parser.add_argument(
"--outtxt", default=False, dest='save_to_txt', action="store_true",
help = "save text file with inference results"
)
parser.add_argument(
"--no_save_img", default=False, action="store_true",
help = "do not save output images with annotated boxes"
)
parser.add_argument(
'--yolo_model', type=str, dest='model_path', default = 'keras_yolo3/yolo_weights_logos.h5',
help='path to YOLO model weight file'
)
parser.add_argument(
'--anchors', type=str, dest='anchors_path', default = 'keras_yolo3/model_data/yolo_anchors.txt',
help='path to YOLO anchors'
)
parser.add_argument(
'--classes', type=str, dest='classes_path', default = 'data_classes.txt',
help='path to YOLO class specifications'
)
parser.add_argument(
'--gpu_num', type=int, default = 1,
help='Number of GPU to use'
)
parser.add_argument(
'--confidence', type=float, dest = 'score', default = 0.1,
help='YOLO object confidence threshold above which to show predictions'
)
FLAGS = parser.parse_args()
save_img_logo = not FLAGS.no_save_img
# define YOLO logo detector
yolo = YOLO(**{"model_path": FLAGS.model_path,
"anchors_path": FLAGS.anchors_path,
"classes_path": FLAGS.classes_path,
"score" : FLAGS.score,
"gpu_num" : FLAGS.gpu_num,
"model_image_size" : (416, 416),
}
)
# image detection mode
if FLAGS.image:
if FLAGS.input_images.endswith('.txt'):
print("Batch image detection mode: reading "+FLAGS.input_images)
output_txt = FLAGS.input_images.split('.txt')[0]+'_pred_logo.txt'
FLAGS.save_to_txt = True
with open(FLAGS.input_images, 'r') as file:
file_list = [line.split(' ')[0] for line in file.read().splitlines()]
FLAGS.input_images = [os.path.abspath(f) for f in file_list]
elif FLAGS.input_images == 'input':
print('Input images to be scanned for logos: (file-by-file or entire directory)')
FLAGS.input_images = parse_input()
elif os.path.isdir(FLAGS.input_images):
FLAGS.input_images = [ os.path.abspath(os.path.join(FLAGS.input_images, f)) for f in os.listdir(FLAGS.input_images) if f.endswith(('.jpg', '.png')) ]
elif os.path.isfile(FLAGS.input_images):
FLAGS.input_images = [ os.path.abspath(FLAGS.input_images) ]
else:
exit('Error: path not found: {}'.format(FLAGS.input_images))
start = timer()
# cycle trough input images, look for logos and then match them against inputs
text_out = ''
for i, img_path in enumerate(FLAGS.input_images):
text_out += (img_path+' ')
prediction, image = detect_logo(yolo, img_path, save_img = save_img_logo,
save_img_path = FLAGS.output,
postfix='_logo')
for pred in prediction:
text_out += ','.join([str(p) for p in pred])+' '
text_out += '\n'
if FLAGS.save_to_txt:
with open(output_txt,'w') as txtfile:
txtfile.write(text_out)
end = timer()
print('Processed {} images in {:.1f}sec - {:.1f}FPS'.format(
len(FLAGS.input_images), end-start, len(FLAGS.input_images)/(end-start)
))
# video detection mode
elif FLAGS.video:
if FLAGS.input_images == 'input':
print('Input video to be scanned for logos: enter one file')
FLAGS.input_images = parse_input()[0]
elif os.path.isfile(FLAGS.input_images):
FLAGS.input_images = os.path.abspath(FLAGS.input_images)
else:
exit('Error: path not found: {}'.format(FLAGS.input_images))
if FLAGS.output == "../data/test/":
FLAGS.output = os.path.splitext(FLAGS.input_images)[0]+'.mp4'
detect_video(yolo, video_path = FLAGS.input_images, output_path = FLAGS.output)
else:
print("Must specify either --image or --video. See usage with --help.")
| 35.188312 | 161 | 0.607677 |
4a22a0d06470206057dece9ac7c80ba902e61d1c | 7,257 | py | Python | data_bootstrap/bootstrap.py | Jenks18/mfl_api | ecbb8954053be06bbcac7e1132811d73534c78d9 | [
"MIT"
] | 19 | 2015-04-16T09:37:08.000Z | 2022-02-10T11:50:30.000Z | data_bootstrap/bootstrap.py | Jenks18/mfl_api | ecbb8954053be06bbcac7e1132811d73534c78d9 | [
"MIT"
] | 125 | 2015-03-26T14:05:49.000Z | 2020-05-14T08:16:50.000Z | data_bootstrap/bootstrap.py | Jenks18/mfl_api | ecbb8954053be06bbcac7e1132811d73534c78d9 | [
"MIT"
] | 39 | 2015-04-15T09:17:33.000Z | 2022-03-28T18:08:16.000Z | import json
import logging
import os
from collections import defaultdict
from django.apps import apps
from django.db import transaction
from django.db.utils import ProgrammingError
from django.contrib.gis.geos import Point
from django.core.exceptions import ObjectDoesNotExist
from common.fields import SequenceField
LOGGER = logging.getLogger(__name__)
def _retrieve_existing_model_instance(model_cls, field_data):
"""RETRIEVE an existing model instance ( mainly used to resolve FKs )"""
# to prevent any modifications making their way back to
# the original dict
assert isinstance(field_data, dict)
try:
instance = model_cls.objects.get(**field_data)
except (ProgrammingError, ObjectDoesNotExist):
keys = field_data.keys()
for key in keys:
value = field_data[key]
if isinstance(value, dict):
fk_model = model_cls._meta.get_field(key).rel.to
fk_instance = fk_model.objects.get(**value)
field_data[key] = fk_instance
else:
# the field is not a foreign key hence no need to upate
# the dict with a model instance
pass
instance = model_cls.objects.get(**field_data)
return instance
def _resolve_foreign_keys_and_coordinates(model_cls, record):
"""Retrieve and link instances of models referred to by FK
Also resolve any GIS ( geojson ) coordinates embedded in the data.
This is the one step that imposes a dependency order to the data load.
The instance that is referred to must already exist.
"""
new_record = {}
for field in record.keys():
field_data = record[field]
model_field = model_cls._meta.get_field(field)
if model_field.get_internal_type() in [
"ForeignKey", "OneToOneField"]:
new_record[field] = _retrieve_existing_model_instance(
model_field.rel.to, field_data)
elif model_field.get_internal_type() == "PointField":
new_record[field] = Point(json.loads(field_data)["coordinates"])
else:
new_record[field] = field_data
return new_record
def _instantiate_single_record(model, unique_fields, record):
"""Create unsaved model instances, ready to be sent to bulk_create"""
assert isinstance(model, str) or isinstance(model, unicode)
assert isinstance(unique_fields, list)
assert isinstance(record, dict)
app, model_name = model.split('.', 1) # split only once
model_cls = apps.get_model(app_label=app, model_name=model_name)
if unique_fields:
unique_dict = {}
for field in unique_fields:
model_field = model_cls._meta.get_field(field)
field_data = record[field]
if model_field.get_internal_type() in [
"ForeignKey", "OneToOneField"]:
field_data = field_data.copy()
instance = _retrieve_existing_model_instance(
model_field.rel.to, field_data)
unique_dict[field] = instance
else:
unique_dict[field] = field_data
try:
instance = model_cls.objects.get(**unique_dict)
return model_cls, None # Must test for None in calling code
except model_cls.DoesNotExist:
try:
normalized_record = _resolve_foreign_keys_and_coordinates(model_cls, record) # NOQA
assert isinstance(normalized_record, dict)
instance = model_cls(**normalized_record)
# Do not allow SequenceField fields to go to the DB null
# bulk_create will not call our custom save()
for field in instance._meta.fields:
if (
isinstance(field, SequenceField)
and not getattr(instance, field.name)
and hasattr(instance, 'generate_next_code_sequence')
):
setattr(
instance,
field.name,
instance.generate_next_code_sequence()
)
return model_cls, instance
except Exception as e: # Don't panic, we will be re-raising
LOGGER.error(
'"{}" when instantiating a record of "{}" with unique '
'fields "{}" and data "{}"'
.format(e, model, unique_fields, record)
)
raise
except model_cls.MultipleObjectsReturned as ex:
LOGGER.error(
'Data bug ( non unique ): "{}". '
'It relates to record "{}" of model "{}", unique fields "{}".'
.format(ex, record, model, unique_fields)
)
return None, None # Calling code should be able to handle this
else:
LOGGER.error('Data file error; unique fields not specified')
def _process_model_spec(model_spec):
"""For each model spec, instantiate but do not save ( bulk save later )"""
model = model_spec['model']
unique_fields = model_spec['unique_fields']
records = model_spec['records']
assert isinstance(model, str) or isinstance(model, unicode)
assert isinstance(unique_fields, list)
assert isinstance(records, list)
assert len(records) > 0
# The first version of this function used some fancy functional techniques
# ( partials / currying ); we go back to a simpler, more deterministic way
unsaved_instances = defaultdict(list)
for record in records:
assert isinstance(record, dict)
try:
model_cls, unsaved_obj = _instantiate_single_record(
model, unique_fields, record
)
except Exception as ex: # Broad catch, reraised after debug logging
LOGGER.error('{} when working on {}'.format(ex, record))
raise
if unsaved_obj: # For existing instances, obj is set to `None`
unsaved_instances[model_cls].append(unsaved_obj)
for model_cls, instances in unsaved_instances.iteritems():
with transaction.atomic():
model_cls.objects.bulk_create(instances)
LOGGER.info(
'Created {} instances of {}'.format(len(instances), model_cls))
def process_json_file(filename):
"""The entry point - loops through data files and loads each in"""
assert isinstance(filename, str)
if os.path.isdir(filename):
LOGGER.info("Filename points to a directory")
return
else:
LOGGER.info('Processing {}'.format(filename))
with open(filename) as f:
model_specs = json.load(f)
assert isinstance(model_specs, list)
assert len(model_specs) > 0
for model_spec in model_specs:
try:
_process_model_spec(model_spec)
except Exception as ex: # Broad catch to allow debug messages
import traceback
traceback.print_exc()
LOGGER.error(
'{} when processing {:.1000}'.format(ex, model_spec))
| 38.194737 | 100 | 0.608103 |
4a22a0e301d1f61cfb2f50afcbb0d9be35f21b90 | 603 | py | Python | solutions/mixmilk.py | 24TangC/USACO-Bronze | 80f0986cb04998b039ba23c7349d25431b4e876b | [
"MIT"
] | null | null | null | solutions/mixmilk.py | 24TangC/USACO-Bronze | 80f0986cb04998b039ba23c7349d25431b4e876b | [
"MIT"
] | null | null | null | solutions/mixmilk.py | 24TangC/USACO-Bronze | 80f0986cb04998b039ba23c7349d25431b4e876b | [
"MIT"
] | null | null | null | capacity = []
current_amount = []
for x in range(0, 3):
inputnumber = input().split()
capacity.append(int(inputnumber[0]))
current_amount.append(int(inputnumber[1]))
for x in range(0, 100):
if (current_amount[x%3] + current_amount[(x+1) % 3]) <= capacity[(x+1)%3]:
current_amount[(x+1) % 3] += current_amount[x % 3]
current_amount[x % 3] = 0
else:
space = capacity[(x+1) % 3] - current_amount[(x+1) % 3]
current_amount[(x + 1) % 3] = capacity[(x+1) % 3]
current_amount[x % 3] -= space
for x in current_amount:
print(x)
| 31.736842 | 79 | 0.568823 |
4a22a2332147db69480ae4942a4187575a37438e | 2,997 | py | Python | fbpcs/private_computation/entity/private_computation_base_stage_flow.py | adshastri/fbpcs | 81d816ee56ea36f8f58dca6ae803fa50138cb91e | [
"MIT"
] | null | null | null | fbpcs/private_computation/entity/private_computation_base_stage_flow.py | adshastri/fbpcs | 81d816ee56ea36f8f58dca6ae803fa50138cb91e | [
"MIT"
] | null | null | null | fbpcs/private_computation/entity/private_computation_base_stage_flow.py | adshastri/fbpcs | 81d816ee56ea36f8f58dca6ae803fa50138cb91e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from abc import abstractmethod
from dataclasses import dataclass
from typing import Type, TypeVar, TYPE_CHECKING
from fbpcs.private_computation.entity.private_computation_status import (
PrivateComputationInstanceStatus,
)
if TYPE_CHECKING:
from fbpcs.private_computation.service.private_computation_stage_service import (
PrivateComputationStageService,
PrivateComputationStageServiceArgs,
)
from fbpcs.stage_flow.stage_flow import StageFlow, StageFlowData
C = TypeVar("C", bound="PrivateComputationBaseStageFlow")
@dataclass(frozen=True)
class PrivateComputationStageFlowData(StageFlowData[PrivateComputationInstanceStatus]):
is_joint_stage: bool
timeout: int = 3600
class PrivateComputationBaseStageFlow(StageFlow):
# TODO(T103297566): [BE] document PrivateComputationBaseStageFlow
def __init__(self, data: PrivateComputationStageFlowData) -> None:
super().__init__()
self.started_status: PrivateComputationInstanceStatus = data.started_status
self.failed_status: PrivateComputationInstanceStatus = data.failed_status
self.completed_status: PrivateComputationInstanceStatus = data.completed_status
self.is_joint_stage: bool = data.is_joint_stage
self.timeout: int = data.timeout
@classmethod
def cls_name_to_cls(cls: Type[C], name: str) -> Type[C]:
"""
Converts the name of an existing stage flow subclass into the subclass object
Arguments:
name: The name of a PrivateComputationBaseStageFlow subclass
Returns:
A subclass of PrivateComputationBaseStageFlow
Raises:
ValueError: raises when no subclass with the name 'name' is found
"""
for subclass in cls.__subclasses__():
if name == subclass.__name__:
return subclass
raise ValueError(f"No subclass with name {name}")
@classmethod
def get_cls_name(cls: Type[C]) -> str:
"""Convenience wrapper around cls.__name__"""
return cls.__name__
@abstractmethod
def get_stage_service(
self, args: "PrivateComputationStageServiceArgs"
) -> "PrivateComputationStageService":
"""
Maps StageFlow instances to StageService instances
Arguments:
args: Common arguments initialized in PrivateComputationService that are consumed by stage services
Returns:
An instantiated StageService object corresponding to the StageFlow enum member caller.
Raises:
NotImplementedError: The subclass doesn't implement a stage service for a given StageFlow enum member
"""
raise NotImplementedError(
f"get_stage_service not implemented for {self.__class__}"
)
| 34.848837 | 113 | 0.71705 |
4a22a4147759e8c6c7ca0385d53a99a48abfa204 | 105,113 | py | Python | dbReports/iondb/rundb/plan/page_plan/step_helper_db_loader.py | konradotto/TS | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | [
"Apache-2.0"
] | null | null | null | dbReports/iondb/rundb/plan/page_plan/step_helper_db_loader.py | konradotto/TS | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | [
"Apache-2.0"
] | null | null | null | dbReports/iondb/rundb/plan/page_plan/step_helper_db_loader.py | konradotto/TS | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | [
"Apache-2.0"
] | 1 | 2020-04-18T13:00:23.000Z | 2020-04-18T13:00:23.000Z | # Copyright (C) 2013 Ion Torrent Systems, Inc. All Rights Reserved
from django.utils.translation import ugettext_lazy
from django.db.models import Q
from iondb.utils import validation
from iondb.rundb.labels import Plan, PlanTemplate
from iondb.rundb.models import (
PlannedExperiment,
PlannedExperimentQC,
RunType,
dnaBarcode,
Plugin,
ApplProduct,
SampleSet,
ThreePrimeadapter,
Chip,
KitInfo,
AnalysisArgs,
ApplicationGroup,
)
from iondb.rundb.plan.page_plan.step_helper import StepHelper
from iondb.rundb.plan.page_plan.step_helper_types import StepHelperType
from iondb.rundb.plan.views_helper import getPlanDisplayedName, getPlanBarcodeCount
import json
from iondb.rundb.json_field import JSONEncoder
from iondb.rundb.plan.page_plan.step_names import StepNames
from iondb.rundb.plan.page_plan.kits_step_data import KitsFieldNames
from iondb.rundb.plan.page_plan.reference_step_data import ReferenceFieldNames
from iondb.rundb.plan.page_plan.plugins_step_data import PluginFieldNames
from iondb.rundb.plan.page_plan.output_step_data import OutputFieldNames
from iondb.rundb.plan.page_plan.barcode_by_sample_step_data import (
BarcodeBySampleFieldNames,
)
from iondb.rundb.plan.page_plan.save_plan_by_sample_step_data import (
SavePlanBySampleFieldNames,
)
from iondb.rundb.plan.page_plan.save_plan_step_data import ApplicationFieldNames
from iondb.rundb.plan.page_plan.save_template_step_data import (
SaveTemplateStepDataFieldNames,
)
from iondb.rundb.plan.page_plan.save_plan_step_data import SavePlanFieldNames
from iondb.rundb.plan.page_plan.ionreporter_step_data import IonReporterFieldNames
from iondb.rundb.plan.page_plan.analysis_params_step_data import (
AnalysisParamsFieldNames,
)
import logging
logger = logging.getLogger(__name__)
class StepHelperDbLoader:
def getStepHelperForRunType(
self,
run_type_id,
step_helper_type=StepHelperType.CREATE_NEW_TEMPLATE,
applicationGroupName=None,
):
"""
Creates a step helper for the specified runtype, this can be a plan or a template step helper.
"""
# logger.debug("ENTER step_helper_db_loader.getStepHelperForRunType() run_type_id=%s; step_helper_type=%s" %(str(run_type_id), step_helper_type))
step_helper = StepHelper(sh_type=step_helper_type)
ionReporter_step_data = step_helper.steps[StepNames.IONREPORTER]
application_step_data = step_helper.steps[StepNames.APPLICATION]
kits_step_data = step_helper.steps[StepNames.KITS]
runType = RunType.objects.get(pk=run_type_id)
applicationGroupObj = (
ApplicationGroup.objects.get(name=applicationGroupName)
if applicationGroupName
else None
)
if applicationGroupObj:
step_helper.parentName = applicationGroupObj.description
else:
step_helper.parentName = runType.description
step_helper.isFromScratch = True
step_helper.isParentSystem = False
self._updateApplicationStepData(
runType, step_helper, application_step_data, applicationGroupObj
)
self._updateKitsStepData(
runType, step_helper, kits_step_data, applicationGroupObj
)
if step_helper.isPlan():
save_plan_step_data = step_helper.steps[StepNames.SAVE_PLAN]
self._updateSaveStepData(runType, step_helper, save_plan_step_data)
return step_helper
def _updateApplicationStepData(
self, runTypeObj, step_helper, application_step_data, applicationGroupObj=None
):
application_step_data.savedFields[
ApplicationFieldNames.RUN_TYPE
] = runTypeObj.pk
if applicationGroupObj:
application_step_data.savedFields[
ApplicationFieldNames.APPLICATION_GROUP_NAME
] = applicationGroupObj.name
application_step_data.savedObjects[
ApplicationFieldNames.APPL_PRODUCT
] = ApplProduct.get_default_for_runType(
runTypeObj.runType, applicationGroupName=applicationGroupObj.name
)
application_step_data.prepopulatedFields[
ApplicationFieldNames.APPL_PRODUCTS
] = ApplProduct.objects.filter(
isActive=True,
isDefaultForInstrumentType=True,
applType__runType=runTypeObj.runType,
applicationGroup=applicationGroupObj,
)
categorizedApplProducts = ApplProduct.objects.filter(
isActive=True,
applType__runType=runTypeObj.runType,
applicationGroup=applicationGroupObj,
).exclude(categories="")
if categorizedApplProducts:
application_step_data.prepopulatedFields[
ApplicationFieldNames.APPL_PRODUCTS_CATEGORIZED
] = categorizedApplProducts
else:
application_step_data.prepopulatedFields[
ApplicationFieldNames.APPL_PRODUCTS_CATEGORIZED
] = None
else:
application_step_data.savedFields[
ApplicationFieldNames.APPLICATION_GROUP_NAME
] = runTypeObj.applicationGroups.all()[0:1][0].name
# application_step_data.savedObjects["runType"] = runTypeObj
# application_step_data.savedObjects["applProduct"] = ApplProduct.objects.get(isActive=True, isDefault=True, isVisible=True,
# applType__runType = runTypeObj.runType)
application_step_data.updateSavedObjectsFromSavedFields()
step_helper.update_dependent_steps(application_step_data)
def _updateKitsStepData(
self, runTypeObj, step_helper, kits_step_data, applicationGroupObj=None
):
kits_step_data.prepopulatedFields[
KitsFieldNames.IS_CHIP_TYPE_REQUIRED
] = step_helper.isPlan()
if applicationGroupObj:
applProduct = ApplProduct.objects.get(
isActive=True,
isVisible=True,
applType__runType=runTypeObj.runType,
applicationGroup=applicationGroupObj,
)
if applProduct:
kits_step_data.updateFieldsFromDefaults(applProduct)
kits_step_data.prepopulatedFields[
KitsFieldNames.ADVANCED_SETTINGS
] = json.dumps(self.get_kit_advanced_settings(step_helper))
def _updateSaveStepData(self, runTypeObj, step_helper, save_plan_step_data):
num_samples = 1
if step_helper.isDualNucleotideTypeBySample():
num_samples = 2
if runTypeObj.runType == "TAG_SEQUENCING":
num_samples = 8
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.NUM_SAMPLES
] = num_samples
def _metaDataFromPlan(self, step_helper, planned_experiment):
metaData = planned_experiment.metaData or {}
if (
step_helper.isCreate()
or step_helper.sh_type == StepHelperType.COPY_TEMPLATE
):
metaData["fromTemplate"] = planned_experiment.planName
metaData["fromTemplateSource"] = (
"ION" if planned_experiment.isSystem else planned_experiment.username
)
return metaData
def getStepHelperForTemplateRunType(
self,
run_type_id,
step_helper_type=StepHelperType.CREATE_NEW_TEMPLATE,
template_id=-1,
):
"""
Creates a template step helper for the specified runty.
"""
# logger.debug("ENTER step_helper_db_loader.getStepHelperForRunType() run_type_id=%s" %(str(run_type_id)))
step_helper = StepHelper(
sh_type=step_helper_type, previous_template_id=template_id
)
ionReporter_step_data = step_helper.steps[StepNames.IONREPORTER]
application_step_data = step_helper.steps[StepNames.APPLICATION]
runType = RunType.objects.get(pk=run_type_id)
step_helper.parentName = runType.description
step_helper.isParentSystem = False
applicationGroupObj = None
if template_id > 0:
planned_experiment = PlannedExperiment.objects.get(pk=template_id)
if planned_experiment.applicationGroup:
applicationGroupObj = planned_experiment.applicationGroup
# if plan/template has applicationGroup, need to pass that along
self._updateApplicationStepData(
runType, step_helper, application_step_data, applicationGroupObj
)
kits_step_data = step_helper.steps[StepNames.KITS]
self._updateKitsStepData(runType, step_helper, kits_step_data)
return step_helper
def getStepHelperForNewTemplateBySample(
self,
run_type_id,
sampleset_id,
step_helper_type=StepHelperType.CREATE_NEW_TEMPLATE_BY_SAMPLE,
):
"""
Start Plan by Sample creation with "Add new Template"
"""
step_helper = StepHelper(sh_type=step_helper_type)
ionReporter_step_data = step_helper.steps[StepNames.IONREPORTER]
application_step_data = step_helper.steps[StepNames.APPLICATION]
kits_step_data = step_helper.steps[StepNames.KITS]
sampleset = SampleSet.objects.filter(pk__in=sampleset_id.split(","))[0]
runType = RunType.objects.get(pk=run_type_id)
step_helper.parentName = runType.description
step_helper.isParentSystem = False
ionReporter_step_data.savedFields["sampleset_id"] = sampleset_id
self._updateApplicationStepData(runType, step_helper, application_step_data)
barcodeSet = None
for item in sampleset.samples.all().order_by("sample__displayedName"):
if item.dnabarcode:
barcodeSet = item.dnabarcode.name
break
kits_step_data.savedFields[KitsFieldNames.BARCODE_ID] = barcodeSet
# 20170928-TODO-WIP
if sampleset.libraryPrepInstrument == "chef":
kits_step_data.savedFields[
KitsFieldNames.TEMPLATE_KIT_TYPE
] = KitsFieldNames.ION_CHEF
if sampleset.libraryPrepKitName and sampleset.libraryPrepKitName != "0":
kits_step_data.savedFields[
KitsFieldNames.LIBRARY_KIT_NAME
] = sampleset.libraryPrepKitName
return step_helper
def updateTemplateSpecificStepHelper(self, step_helper, planned_experiment):
"""
Updates the template specific step helper with template specific info from the planned experiment.
"""
# logger.debug("ENTER step_helper_db_loader.updateTemplateSpecificStepHelper()")
if step_helper.isTemplateBySample():
save_template_step_data = step_helper.steps[
StepNames.SAVE_TEMPLATE_BY_SAMPLE
]
else:
save_template_step_data = step_helper.steps[StepNames.SAVE_TEMPLATE]
planDisplayedName = getPlanDisplayedName(planned_experiment)
if step_helper.sh_type == StepHelperType.COPY_TEMPLATE:
save_template_step_data.savedFields[
SaveTemplateStepDataFieldNames.TEMPLATE_NAME
] = ("Copy of " + planDisplayedName)
else:
save_template_step_data.savedFields[
SaveTemplateStepDataFieldNames.TEMPLATE_NAME
] = planDisplayedName
save_template_step_data.savedFields[
SaveTemplateStepDataFieldNames.SET_AS_FAVORITE
] = planned_experiment.isFavorite
save_template_step_data.savedFields[
SaveTemplateStepDataFieldNames.NOTE
] = planned_experiment.get_notes()
LIMS_meta = planned_experiment.get_LIMS_meta()
# logger.debug("step_helper_db_loader.updateTemplateSpecificStepHelper() type(LIMS_meta)=%s; LIMS_meta=%s" %(type(LIMS_meta), LIMS_meta))
if type(LIMS_meta) is list:
# convert list to string
save_template_step_data.savedFields[
SaveTemplateStepDataFieldNames.LIMS_META
] = "".join(LIMS_meta)
else:
save_template_step_data.savedFields[
SaveTemplateStepDataFieldNames.LIMS_META
] = LIMS_meta
# logger.debug("step_helper_db_loader.updateTemplateSpecificStepHelper() LIMS_META=%s" %(save_template_step_data.savedFields[SaveTemplateStepDataFieldNames.LIMS_META]))
save_template_step_data.savedObjects[
SaveTemplateStepDataFieldNames.META
] = self._metaDataFromPlan(step_helper, planned_experiment)
def updatePlanSpecificStepHelper(
self, step_helper, planned_experiment, set_template_name=False
):
"""
Updates the plan specific step helper with plan specific info from the planned experiment.
If the planned experiment is a template and you'd like the originating template name to show up
in the save plan page pass in set_template_name=True
"""
# logger.debug("ENTER step_helper_db_loader.updatePlanSpecificStepHelper()")
planDisplayedName = getPlanDisplayedName(planned_experiment)
if set_template_name:
if step_helper.isTemplateBySample():
step_helper.steps[StepNames.SAVE_TEMPLATE_BY_SAMPLE].savedFields[
SaveTemplateStepDataFieldNames.TEMPLATE_NAME
] = planDisplayedName
else:
step_helper.steps[StepNames.SAVE_TEMPLATE].savedFields[
SavePlanBySampleFieldNames.TEMPLATE_NAME
] = planDisplayedName
save_plan_step_data = step_helper.steps[StepNames.SAVE_PLAN]
# Add a "copy of" if we're copying.
if step_helper.isCopy():
save_plan_step_data.savedFields[SavePlanFieldNames.PLAN_NAME] = (
"Copy of " + planDisplayedName
)
else:
save_plan_step_data.savedFields[
SavePlanFieldNames.PLAN_NAME
] = planDisplayedName
save_plan_step_data.savedFields[
SavePlanFieldNames.NOTE
] = planned_experiment.get_notes()
LIMS_meta = planned_experiment.get_LIMS_meta()
# logger.debug("step_helper_db_loader.updatePlanSpecificStepHelper() type(LIMS_meta)=%s; LIMS_meta=%s" %(type(LIMS_meta), LIMS_meta))
if type(LIMS_meta) is list:
# convert list to string
save_plan_step_data.savedFields[SavePlanFieldNames.LIMS_META] = "".join(
LIMS_meta
)
else:
save_plan_step_data.savedFields[SavePlanFieldNames.LIMS_META] = LIMS_meta
# logger.debug("step_helper_db_loader.updatePlanSpecificStepHelper() LIMS_META=%s" %(save_plan_step_data.savedFields[SavePlanFieldNames.LIMS_META]))
save_plan_step_data.savedObjects[
SavePlanFieldNames.META
] = self._metaDataFromPlan(step_helper, planned_experiment)
barcodeSet = planned_experiment.get_barcodeId()
endBarcodeSet = planned_experiment.get_endBarcodeKitName()
self._update_barcode_sets_for_edit(
step_helper, barcodeSet, endBarcodeSet, save_plan_step_data
)
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.PLAN_REFERENCE
] = planned_experiment.get_library()
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.PLAN_TARGET_REGION_BED_FILE
] = planned_experiment.get_bedfile()
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.PLAN_HOTSPOT_REGION_BED_FILE
] = planned_experiment.get_regionfile()
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.RUN_TYPE
] = planned_experiment.runType
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.IR_WORKFLOW
] = planned_experiment.irworkflow
isOncoSameSample = False
if (
RunType.is_dna_rna(planned_experiment.runType)
and planned_experiment.runType != "MIXED"
):
sample_count = planned_experiment.get_sample_count()
barcode_count = getPlanBarcodeCount(planned_experiment)
isOncoSameSample = sample_count * 2 == barcode_count
save_plan_step_data.savedFields[
SavePlanFieldNames.ONCO_SAME_SAMPLE
] = isOncoSameSample
# logger.debug("step_helper_db_loader.updatePlanSpecificStepHelper isOncoSameSample=%s" %(isOncoSameSample))
# add IonReporter parameters
irInfo = self._getIRinfo(planned_experiment)
if irInfo:
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.SELECTED_IR
] = irInfo[SavePlanFieldNames.SELECTED_IR]
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.IR_CONFIG_JSON
] = irInfo[SavePlanFieldNames.IR_CONFIG_JSON]
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.SETID_SUFFIX
] = irInfo.get(SavePlanFieldNames.SETID_SUFFIX)
logger.debug(
"step_helper_db_loader.updatePlanSpecificStepHelper() irInfo=%s"
% (irInfo)
)
userInputInfo = irInfo.get("userInputInfo", [])
iru_hasOncoData = False
iru_hasPgsData = False
if userInputInfo:
for info in userInputInfo:
if info.get("cancerType", "") or info.get("cellularityPct", ""):
iru_hasOncoData = True
if (
info.get("biopsyDays", "")
or info.get("coupleID", "")
or info.get("embryoID", "")
):
iru_hasPgsData = True
if planned_experiment.categories:
if (
"Oncomine" in planned_experiment.categories
or "Onconet" in planned_experiment.categories
):
iru_hasOncoData = True
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.HAS_ONCO_DATA
] = iru_hasOncoData
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.HAS_PGS_DATA
] = iru_hasPgsData
samplesTable = self._getSamplesTable_from_plan(
planned_experiment, step_helper, irInfo
)
if samplesTable:
# if a plan is created from IR-enabled template, userInputInfo doesn't exist yet so need to add irWorkflow
if planned_experiment.irworkflow and not (
irInfo and irInfo["userInputInfo"]
):
for sampleDict in samplesTable:
sampleDict["irWorkflow"] = planned_experiment.irworkflow
save_plan_step_data.savedFields[
SavePlanFieldNames.SAMPLES_TABLE
] = json.dumps(samplesTable)
num_samples = len(samplesTable)
if step_helper.isCreate():
# initial number of samples for new plans, if greater than 1
categories = planned_experiment.categories or ""
if step_helper.isDualNucleotideTypeBySample():
num_samples = 2
if step_helper.isBarcoded():
if "ocav2" in categories:
num_samples = 24
elif "barcodes_" in categories:
num_samples = int(
[
s.split("_")
for s in categories.split(";")
if "barcodes_" in s
][0][1]
)
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.NUM_SAMPLES
] = num_samples
if step_helper.isBarcoded():
# do not copy sampleTubeLabel since a sample tube is meant for 1 run only
save_plan_step_data.savedFields[
SavePlanFieldNames.BARCODE_SAMPLE_TUBE_LABEL
] = ("" if step_helper.isCopy() else planned_experiment.sampleTubeLabel)
save_plan_step_data.savedFields[SavePlanFieldNames.CHIP_BARCODE_LABEL] = (
"" if step_helper.isCopy() else planned_experiment.get_chipBarcode()
)
def updateUniversalStepHelper(self, step_helper, planned_experiment):
"""
Update a step helper with info from planned experiment that applies to both plans and templates.
"""
# logger.debug("ENTER step_helper_db_loader.updateUniversalStepHelper()")
# export_step_data = step_helper.steps[StepNames.EXPORT]
ionreporter_step_data = step_helper.steps[StepNames.IONREPORTER]
application_step_data = step_helper.steps[StepNames.APPLICATION]
kits_step_data = step_helper.steps[StepNames.KITS]
reference_step_data = step_helper.steps[StepNames.REFERENCE]
plugins_step_data = step_helper.steps[StepNames.PLUGINS]
analysisParams_step_data = step_helper.steps[StepNames.ANALYSIS_PARAMS]
# if not step_helper.isPlanBySample():
# ionreporter_step_data = step_helper.steps[StepNames.IONREPORTER]
appl_product = None
# application_step_data.updateFromStep(export_step_data)
self._updateUniversalStep_ionReporterData(
step_helper, planned_experiment, ionreporter_step_data
)
appl_product = self._updateUniversalStep_applicationData(
step_helper, planned_experiment, application_step_data
)
logger.debug(
"step_helper_db_loader.updateUniversalStepHelper() planned_experiment.id=%d; applProduct.productCode=%s"
% (planned_experiment.id, appl_product.productCode)
)
self._updateUniversalStep_kitData(
step_helper,
planned_experiment,
appl_product,
application_step_data,
kits_step_data,
)
self._updateUniversalStep_referenceData(
step_helper, planned_experiment, appl_product, reference_step_data
)
self._updateUniversalStep_analysisParamsData(
step_helper,
planned_experiment,
appl_product,
application_step_data,
kits_step_data,
analysisParams_step_data,
)
if step_helper.isEdit() or step_helper.isEditRun() or step_helper.isCopy():
self._updateUniversalStep_applicationData_for_edit(
step_helper, planned_experiment, application_step_data
)
# During plan editing, kits_step_data.updateFromStep() is executed before step_helper_db_loader.updateUniversalStepHelper().
# This results in savedObjects[ApplicationFieldNames.APPL_PRODUCT] not getting set.
# WORKAROUND: The following is a workaround to ensure prepopulatedFields are set for the Kits chevron
self._updateUniversalStep_kitData_for_edit(
step_helper,
planned_experiment,
appl_product,
application_step_data,
kits_step_data,
)
self._updateUniversalStep_referenceData_for_edit(
step_helper,
planned_experiment,
appl_product,
application_step_data,
reference_step_data,
)
self._updateUniversalStep_pluginData_ionreporterData(
step_helper,
planned_experiment,
appl_product,
plugins_step_data,
ionreporter_step_data,
)
logger.debug(
"PLUGINS ARE: %s" % str(plugins_step_data.savedFields[StepNames.PLUGINS])
)
qc_values = planned_experiment.qcValues.all()
if step_helper.isTemplate():
if step_helper.isTemplateBySample():
target_step = StepNames.SAVE_TEMPLATE_BY_SAMPLE
else:
target_step = StepNames.SAVE_TEMPLATE
elif step_helper.isPlanBySample():
target_step = StepNames.SAVE_PLAN_BY_SAMPLE
else:
target_step = StepNames.SAVE_PLAN
for qc_value in qc_values:
step_helper.steps[target_step].savedFields[
qc_value.qcName
] = PlannedExperimentQC.objects.get(
plannedExperiment__pk=planned_experiment.pk, qcType__pk=qc_value.pk
).threshold
step_helper.steps[StepNames.OUTPUT].savedFields[OutputFieldNames.PROJECTS] = []
projects = planned_experiment.projects.all()
for project in projects:
step_helper.steps[StepNames.OUTPUT].savedFields[
OutputFieldNames.PROJECTS
].append(project.pk)
def _updateUniversalStep_ionReporterData(
self, step_helper, planned_experiment, ionreporter_step_data
):
ionreporter_step_data.savedFields[IonReporterFieldNames.SAMPLE_GROUPING] = (
planned_experiment.sampleGrouping.pk
if planned_experiment.sampleGrouping
else None
)
ionreporter_step_data.savedObjects[IonReporterFieldNames.SAMPLE_GROUPING] = (
planned_experiment.sampleGrouping
if planned_experiment.sampleGrouping
else None
)
ionreporter_step_data.prepopulatedFields[
IonReporterFieldNames.CATEGORIES
] = planned_experiment.categories
def _updateUniversalStep_applicationData(
self, step_helper, planned_experiment, application_step_data
):
if RunType.objects.filter(runType=planned_experiment.runType).count() > 0:
selectedRunType = RunType.objects.get(runType=planned_experiment.runType)
else:
selectedRunType = RunType.objects.get(runType="GENS")
application_step_data.savedFields[
ApplicationFieldNames.RUN_TYPE
] = selectedRunType.pk
application_step_data.savedObjects[
ApplicationFieldNames.RUN_TYPE
] = selectedRunType
if (
planned_experiment.applicationGroup
in selectedRunType.applicationGroups.all()
):
selectedApplicationGroup = planned_experiment.applicationGroup
else:
# if no application group is selected, pick the first one associated with runType
selectedApplicationGroup = selectedRunType.applicationGroups.first()
if selectedApplicationGroup:
application_step_data.savedFields[
ApplicationFieldNames.APPLICATION_GROUP_NAME
] = selectedApplicationGroup.name
instrumentType = planned_experiment.experiment.getPlatform
application_step_data.prepopulatedFields[
ApplicationFieldNames.INSTRUMENT_TYPE
] = instrumentType
appl_product = ApplProduct.get_default_for_runType(
selectedRunType.runType,
applicationGroupName=selectedApplicationGroup.name,
instrumentType=instrumentType,
)
application_step_data.savedObjects[
ApplicationFieldNames.APPL_PRODUCT
] = appl_product
application_step_data.prepopulatedFields[
ApplicationFieldNames.CATEGORIES
] = planned_experiment.categories
# logger.debug(" step_helper_db_loader_updateUniversalStep_applicationData() helper.sh_type=%s application_step_data.categories=%s" %(step_helper.sh_type, application_step_data.prepopulatedFields[ApplicationFieldNames.CATEGORIES]))
step = step_helper.steps.get(StepNames.REFERENCE, "")
if step:
self._updateStep_with_applicationData(step, application_step_data)
step.prepopulatedFields[
ReferenceFieldNames.RUN_TYPE
] = application_step_data.savedObjects[
ApplicationFieldNames.RUN_TYPE
].runType
step = step_helper.steps.get(StepNames.ANALYSIS_PARAMS, "")
if step:
step.prepopulatedFields[
AnalysisParamsFieldNames.RUN_TYPE
] = application_step_data.savedObjects[
ApplicationFieldNames.RUN_TYPE
].runType
step.prepopulatedFields[
AnalysisParamsFieldNames.APPLICATION_GROUP_NAME
] = application_step_data.savedFields[
ApplicationFieldNames.APPLICATION_GROUP_NAME
]
step = step_helper.steps.get(StepNames.BARCODE_BY_SAMPLE, "")
if step:
self._updateStep_with_applicationData(step, application_step_data)
step = step_helper.steps.get(StepNames.SAVE_PLAN, "")
if step:
self._updateStep_with_applicationData(step, application_step_data)
step = step_helper.steps.get(StepNames.SAVE_PLAN_BY_SAMPLE, "")
if step:
self._updateStep_with_applicationData(step, application_step_data)
return appl_product
def _updateStep_with_applicationData(self, step, application_step_data):
if step and application_step_data:
step.prepopulatedFields[
SavePlanFieldNames.APPLICATION_GROUP_NAME
] = application_step_data.savedFields[
ApplicationFieldNames.APPLICATION_GROUP_NAME
]
def _updateUniversalStep_applicationData_for_edit(
self, step_helper, planned_experiment, application_step_data
):
application_step_data.prepopulatedFields[
ApplicationFieldNames.PLAN_STATUS
] = planned_experiment.planStatus
applicationGroupObj = (
planned_experiment.applicationGroup
if planned_experiment.applicationGroup
else None
)
categorizedApplProducts = None
if applicationGroupObj:
categorizedApplProducts = ApplProduct.objects.filter(
isActive=True,
applType__runType=planned_experiment.runType,
applicationGroup=applicationGroupObj,
).exclude(categories="")
else:
categorizedApplProducts = ApplProduct.objects.filter(
isActive=True, applType__runType=planned_experiment.runType
).exclude(categories="")
if categorizedApplProducts:
application_step_data.prepopulatedFields[
ApplicationFieldNames.APPL_PRODUCTS_CATEGORIZED
] = categorizedApplProducts
else:
application_step_data.prepopulatedFields[
ApplicationFieldNames.APPL_PRODUCTS_CATEGORIZED
] = None
def _updateUniversalStep_kitData(
self,
step_helper,
planned_experiment,
appl_product,
application_step_data,
kits_step_data,
):
application_step_data.savedObjects[
ApplicationFieldNames.APPL_PRODUCT
] = appl_product
kits_step_data.savedFields[KitsFieldNames.TEMPLATE_KIT_TYPE] = "OneTouch"
if planned_experiment.is_ionChef():
kits_step_data.savedFields[KitsFieldNames.TEMPLATE_KIT_TYPE] = "IonChef"
elif planned_experiment.is_isoAmp():
kits_step_data.savedFields[KitsFieldNames.TEMPLATE_KIT_TYPE] = "IA"
kits_step_data.savedFields[
KitsFieldNames.TEMPLATE_KIT_NAME
] = planned_experiment.templatingKitName
kits_step_data.savedFields[
KitsFieldNames.CONTROL_SEQUENCE
] = planned_experiment.controlSequencekitname
kits_step_data.savedFields[
KitsFieldNames.SAMPLE_PREPARATION_KIT
] = planned_experiment.samplePrepKitName
kits_step_data.savedFields[
KitsFieldNames.BARCODE_ID
] = planned_experiment.get_barcodeId()
chipType = planned_experiment.get_chipType()
kits_step_data.savedFields[KitsFieldNames.CHIP_TYPE] = (
"318" if chipType == "318v2" else chipType
)
kits_step_data.prepopulatedFields[
KitsFieldNames.IS_CHIP_TYPE_REQUIRED
] = step_helper.isPlan()
kits_step_data.savedFields[
KitsFieldNames.FLOWS
] = planned_experiment.get_flows()
kits_step_data.savedFields[
KitsFieldNames.LIBRARY_READ_LENGTH
] = planned_experiment.libraryReadLength
kits_step_data.savedFields[
KitsFieldNames.READ_LENGTH
] = planned_experiment.libraryReadLength
kits_step_data.savedFields[
KitsFieldNames.FORWARD_3_PRIME_ADAPTER
] = planned_experiment.get_forward3primeadapter()
kits_step_data.savedFields[
KitsFieldNames.FLOW_ORDER
] = planned_experiment.experiment.flowsInOrder
kits_step_data.savedFields[
KitsFieldNames.LIBRARY_KEY
] = planned_experiment.get_libraryKey()
tfKey = planned_experiment.get_tfKey()
if tfKey:
kits_step_data.savedFields[KitsFieldNames.TF_KEY] = tfKey
kits_step_data.savedFields[
KitsFieldNames.LIBRARY_KIT_NAME
] = planned_experiment.get_librarykitname()
kits_step_data.savedFields[
KitsFieldNames.SEQUENCE_KIT_NAME
] = planned_experiment.get_sequencekitname()
kits_step_data.savedFields[
KitsFieldNames.IS_DUPLICATED_READS
] = planned_experiment.is_duplicateReads()
kits_step_data.savedFields[
KitsFieldNames.BASE_RECALIBRATE
] = planned_experiment.get_base_recalibration_mode()
kits_step_data.savedFields[
KitsFieldNames.REALIGN
] = planned_experiment.do_realign()
kits_step_data.savedFields[
KitsFieldNames.SAMPLE_PREP_PROTOCOL
] = planned_experiment.samplePrepProtocol
kits_step_data.prepopulatedFields[KitsFieldNames.PLAN_CATEGORIES] = (
planned_experiment.categories or ""
)
kits_step_data.prepopulatedFields[
KitsFieldNames.IS_BARCODE_KIT_SELECTION_REQUIRED
] = appl_product.isBarcodeKitSelectionRequired
kits_step_data.savedFields[KitsFieldNames.ADVANCED_SETTINGS_CHOICE] = (
"custom" if planned_experiment.isCustom_kitSettings else "default"
)
kits_step_data.prepopulatedFields[
KitsFieldNames.ADVANCED_SETTINGS
] = json.dumps(self.get_kit_advanced_settings(step_helper, planned_experiment))
def _updateUniversalStep_kitData_for_edit(
self,
step_helper,
planned_experiment,
appl_product,
application_step_data,
kits_step_data,
):
application_step_data.savedObjects[
ApplicationFieldNames.APPL_PRODUCT
] = appl_product
# no chip type selection for sequenced run
if step_helper.isEditRun():
kits_step_data.prepopulatedFields[
KitsFieldNames.CHIP_TYPES
] = Chip.objects.filter(
name=kits_step_data.savedFields[KitsFieldNames.CHIP_TYPE]
)
available_dnaBarcodes = dnaBarcode.objects.filter(Q(active=True))
# if editing a sequenced run old/obsolete chipType and kits must be included
if step_helper.isEditRun() or step_helper.isEdit():
kits_step_data.prepopulatedFields[
KitsFieldNames.CONTROL_SEQ_KITS
] |= KitInfo.objects.filter(
name=kits_step_data.savedFields[KitsFieldNames.CONTROL_SEQUENCE]
)
kits_step_data.prepopulatedFields[
KitsFieldNames.SAMPLE_PREP_KITS
] |= KitInfo.objects.filter(
name=kits_step_data.savedFields[KitsFieldNames.SAMPLE_PREPARATION_KIT]
)
kits_step_data.prepopulatedFields[
KitsFieldNames.LIB_KITS
] |= KitInfo.objects.filter(
name=kits_step_data.savedFields[KitsFieldNames.LIBRARY_KIT_NAME]
)
kits_step_data.prepopulatedFields[
KitsFieldNames.SEQ_KITS
] |= KitInfo.objects.filter(
name=kits_step_data.savedFields[KitsFieldNames.SEQUENCE_KIT_NAME]
)
savedtemplatekit = KitInfo.objects.filter(
name=kits_step_data.savedFields[KitsFieldNames.TEMPLATE_KIT_NAME]
)
kits_step_data.prepopulatedFields[
KitsFieldNames.TEMPLATE_KITS
] |= savedtemplatekit
oneTouchKits = kits_step_data.prepopulatedFields[
KitsFieldNames.TEMPLATE_KIT_TYPES
][KitsFieldNames.ONE_TOUCH][KitsFieldNames.KIT_VALUES]
ionChefKits = kits_step_data.prepopulatedFields[
KitsFieldNames.TEMPLATE_KIT_TYPES
][KitsFieldNames.ION_CHEF][KitsFieldNames.KIT_VALUES]
isoAmpKits = kits_step_data.prepopulatedFields[
KitsFieldNames.TEMPLATE_KIT_TYPES
][KitsFieldNames.ISO_AMP][KitsFieldNames.KIT_VALUES]
kits_step_data.prepopulatedFields[KitsFieldNames.TEMPLATE_KIT_TYPES][
KitsFieldNames.ONE_TOUCH
][KitsFieldNames.KIT_VALUES] |= savedtemplatekit.filter(
kitType__in=oneTouchKits.values_list("kitType", flat=True)
)
kits_step_data.prepopulatedFields[KitsFieldNames.TEMPLATE_KIT_TYPES][
KitsFieldNames.ION_CHEF
][KitsFieldNames.KIT_VALUES] |= savedtemplatekit.filter(
kitType__in=ionChefKits.values_list("kitType", flat=True)
)
kits_step_data.prepopulatedFields[KitsFieldNames.TEMPLATE_KIT_TYPES][
KitsFieldNames.ISO_AMP
][KitsFieldNames.KIT_VALUES] |= savedtemplatekit.filter(
kitType__in=isoAmpKits.values_list("kitType", flat=True)
)
available_dnaBarcodes = dnaBarcode.objects.filter(
Q(active=True) | Q(name=planned_experiment.get_barcodeId())
)
# if step_helper.isEdit():
logger.debug(
"step_helper_db_loader._updateUniversalStep_kitData_for_edit() - isEdit - appl_product.barcodeKitSelectableType=%s"
% (appl_product.barcodeKitSelectableType)
)
if appl_product.applType.runType in ["AMPS", "AMPS_EXOME"]:
kits_step_data.prepopulatedFields[
KitsFieldNames.CONTROL_SEQ_KITS
] = KitInfo.objects.filter(
kitType="ControlSequenceKit",
applicationType__in=["", "DNA", "AMPS_ANY"],
isActive=True,
).order_by(
"name"
)
elif appl_product.applType.runType in ["AMPS_RNA"]:
kits_step_data.prepopulatedFields[
KitsFieldNames.CONTROL_SEQ_KITS
] = KitInfo.objects.filter(
kitType="ControlSequenceKit",
applicationType__in=["", "RNA", "AMPS_ANY"],
isActive=True,
).order_by(
"name"
)
elif appl_product.applType.runType in ["RNA"]:
kits_step_data.prepopulatedFields[
KitsFieldNames.CONTROL_SEQ_KITS
] = KitInfo.objects.filter(
kitType="ControlSequenceKit", applicationType="RNA", isActive=True
).order_by(
"name"
)
elif appl_product.applType.runType in ["AMPS_DNA_RNA"]:
kits_step_data.prepopulatedFields[
KitsFieldNames.CONTROL_SEQ_KITS
] = KitInfo.objects.filter(
kitType="ControlSequenceKit",
applicationType__in=["", "DNA", "RNA", "AMPS_ANY"],
isActive=True,
).order_by(
"name"
)
else:
kits_step_data.prepopulatedFields[
KitsFieldNames.CONTROL_SEQ_KITS
] = KitInfo.objects.filter(
kitType="ControlSequenceKit",
applicationType__in=["", "DNA"],
isActive=True,
).order_by(
"name"
)
kits_step_data.prepopulatedFields[KitsFieldNames.BARCODES] = list(
available_dnaBarcodes.values("name").distinct().order_by("name")
)
kits_step_data.prepopulatedFields[KitsFieldNames.BARCODES_SUBSET] = list(
available_dnaBarcodes.filter(
type__in=appl_product.barcodeKitSelectableTypes_list
)
.values("name")
.distinct()
.order_by("name")
)
def _updateUniversalStep_referenceData(
self, step_helper, planned_experiment, appl_product, reference_step_data
):
# logger.debug("ENTER step_helper_db_loader._updateUniversalStep_referenceData()...")
reference_step_data.savedFields[
ReferenceFieldNames.TARGET_BED_FILE
] = planned_experiment.get_bedfile()
reference_step_data.savedFields[
ReferenceFieldNames.REFERENCE
] = planned_experiment.get_library()
reference_step_data.savedFields[
ReferenceFieldNames.HOT_SPOT_BED_FILE
] = planned_experiment.get_regionfile()
sseBedFile = planned_experiment.get_sseBedFile()
targetRegionBEDFile = reference_step_data.savedFields[
ReferenceFieldNames.TARGET_BED_FILE
]
if sseBedFile and targetRegionBEDFile:
reference_step_data.prepopulatedFields[
ReferenceFieldNames.SSE_BED_FILE_DICT
][targetRegionBEDFile.split("/")[-1]] = sseBedFile
mixedTypeRNA_targetRegion = planned_experiment.get_mixedType_rna_bedfile()
reference_step_data.savedFields[
ReferenceFieldNames.MIXED_TYPE_RNA_TARGET_BED_FILE
] = ("" if mixedTypeRNA_targetRegion is None else mixedTypeRNA_targetRegion)
mixedTypeRNA_reference = planned_experiment.get_mixedType_rna_library()
reference_step_data.savedFields[
ReferenceFieldNames.MIXED_TYPE_RNA_REFERENCE
] = ("" if mixedTypeRNA_reference is None else mixedTypeRNA_reference)
mixedTypeRNA_hotSpot = planned_experiment.get_mixedType_rna_regionfile()
reference_step_data.savedFields[
ReferenceFieldNames.MIXED_TYPE_RNA_HOT_SPOT_BED_FILE
] = ("" if mixedTypeRNA_hotSpot is None else mixedTypeRNA_hotSpot)
reference_step_data.savedFields[
ReferenceFieldNames.SAME_REF_INFO_PER_SAMPLE
] = self._getIsSameRefInfoPerSample(step_helper, planned_experiment)
logger.debug(
"step_helper_db_loader._updateUniversalStep_referenceData() REFERENCE savedFields=%s"
% (reference_step_data.savedFields)
)
logger.debug(
"step_helper_db_loader._updateUniversalStep_referenceData() REFERENCE appl_product=%s"
% (appl_product)
)
reference_step_data.prepopulatedFields[
ReferenceFieldNames.SHOW_HOT_SPOT_BED
] = True
if appl_product and not appl_product.isHotspotRegionBEDFileSuppported:
reference_step_data.prepopulatedFields[
ReferenceFieldNames.SHOW_HOT_SPOT_BED
] = False
# if the plan or template has pre-selected reference info, it is possible that it is not found in db in this TS instance
# a plan's or template's pre-selected reference info trumps applProducts default selection values!
if reference_step_data.savedFields[ReferenceFieldNames.REFERENCE]:
reference_step_data.prepopulatedFields[
ReferenceFieldNames.REFERENCE_MISSING
] = True
if reference_step_data.savedFields[ReferenceFieldNames.REFERENCE] in [
ref.short_name
for ref in reference_step_data.prepopulatedFields[
ReferenceFieldNames.REFERENCES
]
]:
reference_step_data.prepopulatedFields[
ReferenceFieldNames.REFERENCE_MISSING
] = False
else:
logger.debug(
"at step_helper_db_loader.updateUniversalStepHelper() REFERENCE_MISSING saved reference=%s"
% (reference_step_data.savedFields[ReferenceFieldNames.REFERENCE])
)
else:
reference_step_data.prepopulatedFields[
ReferenceFieldNames.REFERENCE_MISSING
] = False
stepHelper_type = step_helper.sh_type
logger.debug(
"step_helper_db_loader._updateUniversalStep_referenceData() stepHelper_type=%s; reference_step_data.savedFields=%s"
% (stepHelper_type, reference_step_data.savedFields)
)
if (
stepHelper_type == StepHelperType.CREATE_NEW_PLAN_BY_SAMPLE
or stepHelper_type == StepHelperType.EDIT_PLAN_BY_SAMPLE
or stepHelper_type == StepHelperType.COPY_PLAN_BY_SAMPLE
):
barcoding_step = step_helper.steps[StepNames.BARCODE_BY_SAMPLE]
save_plan_step = step_helper.steps[StepNames.SAVE_PLAN_BY_SAMPLE]
barcoding_step.prepopulatedFields[
SavePlanFieldNames.PLAN_REFERENCE
] = reference_step_data.savedFields.get(ReferenceFieldNames.REFERENCE, "")
barcoding_step.prepopulatedFields[
SavePlanFieldNames.PLAN_TARGET_REGION_BED_FILE
] = reference_step_data.savedFields.get(
ReferenceFieldNames.TARGET_BED_FILE, ""
)
barcoding_step.prepopulatedFields[
SavePlanFieldNames.PLAN_HOTSPOT_REGION_BED_FILE
] = reference_step_data.savedFields.get(
ReferenceFieldNames.HOT_SPOT_BED_FILE, ""
)
# logger.debug("step_helper_db_loader._updateUniversalStep_referenceData() stepHelper_type=%s; barcoding_step.savedFields=%s" %(stepHelper_type, barcoding_step.savedFields))
# logger.debug("step_helper_db_loader._updateUniversalStep_referenceData() stepHelper_type=%s; step_helper=%s; barcoding_step=%s" %(stepHelper_type, step_helper, barcoding_step))
save_plan_step.prepopulatedFields[
SavePlanFieldNames.PLAN_REFERENCE
] = reference_step_data.savedFields.get(ReferenceFieldNames.REFERENCE, "")
save_plan_step.prepopulatedFields[
SavePlanFieldNames.PLAN_TARGET_REGION_BED_FILE
] = reference_step_data.savedFields.get(
ReferenceFieldNames.TARGET_BED_FILE, ""
)
save_plan_step.prepopulatedFields[
SavePlanFieldNames.PLAN_HOTSPOT_REGION_BED_FILE
] = reference_step_data.savedFields.get(
ReferenceFieldNames.HOT_SPOT_BED_FILE, ""
)
barcoding_step.savedObjects[
SavePlanFieldNames.REFERENCE_STEP_HELPER
] = reference_step_data
save_plan_step.savedObjects[
SavePlanFieldNames.REFERENCE_STEP_HELPER
] = reference_step_data
elif (
stepHelper_type == StepHelperType.CREATE_NEW_PLAN
or stepHelper_type == StepHelperType.COPY_PLAN
or stepHelper_type == StepHelperType.EDIT_PLAN
or stepHelper_type == StepHelperType.EDIT_RUN
):
save_plan_step_data = step_helper.steps[StepNames.SAVE_PLAN]
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.PLAN_REFERENCE
] = reference_step_data.savedFields.get(ReferenceFieldNames.REFERENCE, "")
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.PLAN_TARGET_REGION_BED_FILE
] = reference_step_data.savedFields.get(
ReferenceFieldNames.TARGET_BED_FILE, ""
)
save_plan_step_data.prepopulatedFields[
SavePlanFieldNames.PLAN_HOTSPOT_REGION_BED_FILE
] = reference_step_data.savedFields.get(
ReferenceFieldNames.HOT_SPOT_BED_FILE, ""
)
save_plan_step_data.savedObjects[
SavePlanFieldNames.REFERENCE_STEP_HELPER
] = reference_step_data
def _updateUniversalStep_referenceData_for_edit(
self,
step_helper,
planned_experiment,
appl_product,
application_step_data,
reference_step_data,
):
# logger.debug("_updateUniversalStep_referenceData_for_edit appl_product=%s" %(appl_product))
reference_step_data.prepopulatedFields[
ReferenceFieldNames.SHOW_HOT_SPOT_BED
] = True
reference_step_data.prepopulatedFields[
ReferenceFieldNames.REQUIRE_TARGET_BED_FILE
] = False
if appl_product:
reference_step_data.prepopulatedFields[
ReferenceFieldNames.SHOW_HOT_SPOT_BED
] = appl_product.isHotspotRegionBEDFileSuppported
reference_step_data.prepopulatedFields[
ReferenceFieldNames.REQUIRE_TARGET_BED_FILE
] = appl_product.isTargetRegionBEDFileSelectionRequiredForRefSelection
reference_step_data.prepopulatedFields[
ReferenceFieldNames.PLAN_STATUS
] = planned_experiment.planStatus
def _update_barcode_sets_for_edit(
self, step_helper, barcodeSet, endBarcodeSet, update_step_data
):
appl_product = step_helper.getApplProduct()
update_step_data.savedObjects[SavePlanFieldNames.APPL_PRODUCT] = appl_product
update_step_data.savedFields[SavePlanFieldNames.BARCODE_SET] = barcodeSet
if barcodeSet:
barcodeSets, all_barcodes = self._get_all_barcodeSets_n_barcodes_for_selection(
barcodeSet
)
update_step_data.prepopulatedFields[
SavePlanFieldNames.BARCODE_SETS
] = barcodeSets
update_step_data.prepopulatedFields[
SavePlanFieldNames.BARCODE_SETS_BARCODES
] = json.dumps(all_barcodes)
update_step_data.savedFields[SavePlanFieldNames.END_BARCODE_SET] = endBarcodeSet
if endBarcodeSet:
barcodeSets, all_barcodes = self._get_all_barcodeSets_n_barcodes_for_selection(
endBarcodeSet
)
update_step_data.prepopulatedFields[
SavePlanFieldNames.END_BARCODE_SETS
] = barcodeSets
update_step_data.prepopulatedFields[
SavePlanFieldNames.END_BARCODE_SETS_BARCODES
] = json.dumps(all_barcodes)
def _updateUniversalStep_analysisParamsData(
self,
step_helper,
planned_experiment,
appl_product,
application_step_data,
kits_step_data,
analysisParams_step_data,
):
self._updateUniversalStep_analysisParamsData_basic(
step_helper,
planned_experiment,
appl_product,
application_step_data,
kits_step_data,
analysisParams_step_data,
)
# current chip selection
chipType = planned_experiment.get_chipType()
if Chip.objects.filter(name=chipType).count() == 0:
chipType = chipType[:3]
# there is no analysisArgs db definition for 318v2
analysisParams_step_data.prepopulatedFields[
AnalysisParamsFieldNames.CHIP_TYPE
] = ("318" if chipType == "318v2" else chipType)
logger.debug(
"step_helper_db_loader._updateUniversalStep_analysisParamsData() chipType=%s;"
% (chipType)
)
applicationGroupName = (
planned_experiment.applicationGroup.name
if planned_experiment.applicationGroup
else ""
)
best_match_entry = AnalysisArgs.best_match(
chipType,
planned_experiment.get_sequencekitname(),
planned_experiment.templatingKitName,
planned_experiment.get_librarykitname(),
planned_experiment.samplePrepKitName,
None,
planned_experiment.runType,
applicationGroupName,
planned_experiment.categories,
)
# system templates may not have any analysis args pre-selected
doesPlanHaveCustomAnalysisArgs = self._doesPlanHaveCustomAnalysisArgs(
planned_experiment
)
if doesPlanHaveCustomAnalysisArgs or not best_match_entry:
current_selected_analysisArgs = (
planned_experiment.latest_eas.get_cmdline_args()
)
else:
current_selected_analysisArgs = best_match_entry.get_args()
current_selected_analysisArgs.update(
{
"description": AnalysisParamsFieldNames.AP_ENTRY_SELECTED_VALUE,
"name": "",
"custom_args": doesPlanHaveCustomAnalysisArgs,
}
)
self._updateUniversalStep_analysisParamsData_currentSelection(
analysisParams_step_data, current_selected_analysisArgs
)
analysisParams_step_data.savedFields[AnalysisParamsFieldNames.AP_CUSTOM] = (
"True" if doesPlanHaveCustomAnalysisArgs else "False"
)
def _doesPlanHaveCustomAnalysisArgs(self, planned_experiment):
latest_eas = planned_experiment.latest_eas
if latest_eas and latest_eas.custom_args and latest_eas.have_args():
return True
else:
return False
def _updateUniversalStep_analysisParamsData_currentSelection(
self, analysisParams_step_data, current_selected_analysisArgs
):
analysisParams_step_data.savedObjects[
AnalysisParamsFieldNames.AP_ENTRY_SELECTED
] = current_selected_analysisArgs
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_BEADFIND_SELECTED
] = current_selected_analysisArgs["beadfindargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_ANALYSISARGS_SELECTED
] = current_selected_analysisArgs["analysisargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_PREBASECALLER_SELECTED
] = current_selected_analysisArgs["prebasecallerargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_CALIBRATE_SELECTED
] = current_selected_analysisArgs["calibrateargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_BASECALLER_SELECTED
] = current_selected_analysisArgs["basecallerargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_ALIGNMENT_SELECTED
] = current_selected_analysisArgs["alignmentargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_IONSTATS_SELECTED
] = current_selected_analysisArgs["ionstatsargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_BEADFIND_SELECTED
] = current_selected_analysisArgs["thumbnailbeadfindargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_ANALYSISARGS_SELECTED
] = current_selected_analysisArgs["thumbnailanalysisargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_PREBASECALLER_SELECTED
] = current_selected_analysisArgs["prethumbnailbasecallerargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_CALIBRATE_SELECTED
] = current_selected_analysisArgs["thumbnailcalibrateargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_BASECALLER_SELECTED
] = current_selected_analysisArgs["thumbnailbasecallerargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_ALIGNMENT_SELECTED
] = current_selected_analysisArgs["thumbnailalignmentargs"]
analysisParams_step_data.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_IONSTATS_SELECTED
] = current_selected_analysisArgs["thumbnailionstatsargs"]
def _updateUniversalStep_analysisParamsData_basic(
self,
step_helper,
planned_experiment,
appl_product,
application_step_data,
kits_step_data,
analysisParams_step_data,
):
# current chip selection
chipType = planned_experiment.get_chipType()
# current runType
runType = planned_experiment.runType
# current application group
applicationGroup = planned_experiment.applicationGroup
applicationGroupName = applicationGroup.name if applicationGroup else ""
# return a list of entries
possible_match_entries = AnalysisArgs.possible_matches(
chipType,
planned_experiment.get_sequencekitname(),
planned_experiment.templatingKitName,
planned_experiment.get_librarykitname(),
planned_experiment.samplePrepKitName,
None,
runType,
applicationGroupName,
planned_experiment.categories,
)
best_match_entry = planned_experiment.get_default_cmdline_args_obj()
for ap in possible_match_entries:
if ap.name == best_match_entry.name:
ap.name = AnalysisParamsFieldNames.AP_ENTRY_BEST_MATCH_PLAN_VALUE
ap.best_match = True
logger.debug(
"step_helper_db_loader._updateUniversalStep_analysisParamsData_basic() ANALYSIS_PARAMS possible_match_entries=%s"
% (possible_match_entries)
)
analysisParams_step_data.prepopulatedFields[
AnalysisParamsFieldNames.AP_ENTRIES
] = possible_match_entries
analysisParams_step_data.prepopulatedFields[
AnalysisParamsFieldNames.AP_DISPLAYED_NAMES
] = [ap.description for ap in possible_match_entries]
analysisParams_step_data.prepopulatedFields[
AnalysisParamsFieldNames.CATEGORIES
] = planned_experiment.categories
logger.debug(
"step_helper_db_loader._updateUniversalStep_analysisParamsData_basic() chipType=%s; runType=%s; applicationGroupName=%s"
% (chipType, runType, applicationGroupName)
)
def _getIsSameRefInfoPerSample(self, step_helper, planned_experiment):
stepHelper_type = step_helper.sh_type
if stepHelper_type in [
StepHelperType.EDIT_PLAN_BY_SAMPLE,
StepHelperType.COPY_PLAN_BY_SAMPLE,
StepHelperType.COPY_PLAN,
StepHelperType.EDIT_PLAN,
StepHelperType.EDIT_RUN,
]:
return planned_experiment.is_same_refInfo_as_defaults_per_sample()
else:
return True
def _updateUniversalStep_pluginData_ionreporterData(
self,
step_helper,
planned_experiment,
appl_product,
plugins_step_data,
ionreporter_step_data,
):
plugins_step_data.savedFields[StepNames.PLUGINS] = []
plugins = planned_experiment.get_selectedPlugins()
pluginIds = []
for plugin_name, plugin_dict in list(plugins.items()):
# find existing plugin by plugin_name (handles plugins that were reinstalled or uninstalled)
try:
plugin = Plugin.objects.filter(name=plugin_name, active=True)[0]
except Exception:
continue
# we now need to show all non-IRU export plugins on the Plugins chevron
if "ionreporter" in plugin_name.lower():
# if PluginFieldNames.EXPORT in plugin.pluginsettings.get(PluginFieldNames.FEATURES,[]):
if not step_helper.isPlanBySample():
# ionreporter_step_data.savedFields[IonReporterFieldNames.UPLOADERS].append(plugin.id)
pass
else:
pluginIds.append(plugin.id)
plugins_step_data.savedFields[
PluginFieldNames.PLUGIN_CONFIG % plugin.id
] = json.dumps(
plugin_dict.get(PluginFieldNames.USER_INPUT, ""),
cls=JSONEncoder,
separators=(",", ":"),
)
if "accountId" in plugin_dict:
ionreporter_step_data.savedFields[
IonReporterFieldNames.IR_ACCOUNT_ID
] = plugin_dict.get("accountId")
ionreporter_step_data.savedFields[
IonReporterFieldNames.IR_ACCOUNT_NAME
] = plugin_dict.get("accountName")
ionreporter_step_data.savedFields[
IonReporterFieldNames.IR_VERSION
] = plugin_dict.get("version")
ionreporter_step_data.savedFields[
IonReporterFieldNames.IRU_UPLOAD_MODE
] = plugin_dict[PluginFieldNames.USER_INPUT].get(
"iru_qc_option", "no_check"
)
elif (
PluginFieldNames.USER_INPUT in plugin_dict
and "accountId" in plugin_dict[PluginFieldNames.USER_INPUT]
):
ionreporter_step_data.savedFields[
IonReporterFieldNames.IR_ACCOUNT_ID
] = plugin_dict[PluginFieldNames.USER_INPUT].get("accountId")
ionreporter_step_data.savedFields[
IonReporterFieldNames.IR_ACCOUNT_NAME
] = plugin_dict[PluginFieldNames.USER_INPUT].get("accountName")
ionreporter_step_data.savedFields[
IonReporterFieldNames.IRU_UPLOAD_MODE
] = plugin_dict[PluginFieldNames.USER_INPUT].get("iru_qc_option")
if "userconfigs" in plugin.config:
if "ionadmin" in plugin.config.get("userconfigs"):
_list = plugin.config.get("userconfigs").get("ionadmin")
for l in _list:
if (
l.get("id")
== ionreporter_step_data.savedFields[
IonReporterFieldNames.IR_ACCOUNT_ID
]
):
ionreporter_step_data.savedFields[
IonReporterFieldNames.IR_VERSION
] = l.get("version")
# tag_isFactoryProvidedWorkflow is stored in userInputInfo list
for info in plugin_dict[PluginFieldNames.USER_INPUT].get(
"userInputInfo", []
):
if info["Workflow"] == planned_experiment.irworkflow:
step_helper.steps[StepNames.IONREPORTER].savedFields[
IonReporterFieldNames.IR_ISFACTORY
] = info.get("tag_isFactoryProvidedWorkflow")
break
if "IonReporterUploader" not in plugins:
ionreporter_step_data.savedFields[IonReporterFieldNames.IR_ACCOUNT_ID] = "0"
ionreporter_step_data.savedFields[
IonReporterFieldNames.IR_ACCOUNT_NAME
] = "None"
step_helper.steps[StepNames.IONREPORTER].savedFields[
IonReporterFieldNames.IR_WORKFLOW
] = planned_experiment.irworkflow
plugins_step_data.savedFields[PluginFieldNames.PLUGIN_IDS] = ", ".join(
str(v) for v in pluginIds
)
plugins_step_data.updateSavedObjectsFromSavedFields()
def get_ir_fields_dict_from_user_input_info(
self, user_input_info, sample_name, index
):
# logger.debug("ENTER step_helper_db_loader.get_ir_fields_dict_from_user_input_info()")
if sample_name == "barcoded--Sample":
if index >= len(list(user_input_info[0].keys())):
return dict(
sample="",
sampleDescription="",
sampleExternalId="",
barcodeId="",
SampleCollectionDate=None,
SampleReceiptDate=None,
Gender=None,
Population=None,
RelationRole=None,
Workflow=None,
mouseStrains=None,
setid=None,
cancerType=None,
cellularityPct=None,
biopsyDays=None,
cellNum=None,
coupleID=None,
embryoID=None,
)
sample_name = list(user_input_info[0].keys())[index]
# do not re-invent what has already been persisted in the JSON blob!
barcodeSampleInfo = (
user_input_info[0].get(sample_name).get("barcodeSampleInfo", {})
)
barcode_id_strs = sorted(
user_input_info[0].get(sample_name).get("barcodeSampleInfo").keys()
)
barcode_id_str = list(
user_input_info[0].get(sample_name).get("barcodeSampleInfo").keys()
)[0]
sampleDescription = (
user_input_info[0]
.get(sample_name)
.get("barcodeSampleInfo")
.get(barcode_id_str)
.get("description")
)
externalId = (
user_input_info[0]
.get(sample_name)
.get("barcodeSampleInfo")
.get(barcode_id_str)
.get("externalId")
)
return dict(
sample=sample_name,
sampleDescription=sampleDescription,
sampleExternalId=externalId,
barcodeSampleInfo=barcodeSampleInfo,
barcode_id_strs=barcode_id_strs,
SampleCollectionDate=None,
SampleReceiptDate=None,
Gender=None,
Population=None,
RelationRole=None,
Workflow=None,
mouseStrains=None,
setid=None,
cancerType=None,
cellularityPct=None,
biopsyDays=None,
cellNum=None,
coupleID=None,
embryoID=None,
)
else:
return user_input_info[index]
def updatePlanBySampleSpecificStepHelper(
self, step_helper, planned_experiment, sampleset_id=None
):
"""
"""
# logger.debug("ENTER step_helper_db_loader.updatePlanBySampleSpecificStepHelper() planned_experiment.id=%d; step_helper=%s" %(planned_experiment.id, step_helper))
barcoding_step = step_helper.steps[StepNames.BARCODE_BY_SAMPLE]
save_plan_step = step_helper.steps[StepNames.SAVE_PLAN_BY_SAMPLE]
planDisplayedName = getPlanDisplayedName(planned_experiment)
if step_helper.isCopy():
save_plan_step.savedFields[SavePlanBySampleFieldNames.TEMPLATE_NAME] = (
"Copy of " + planDisplayedName
)
else:
save_plan_step.savedFields[
SavePlanBySampleFieldNames.TEMPLATE_NAME
] = planDisplayedName
existing_plan = step_helper.isEdit() or step_helper.isCopy()
barcoding_step.prepopulatedFields[
SavePlanFieldNames.RUN_TYPE
] = planned_experiment.runType
save_plan_step.prepopulatedFields[
SavePlanFieldNames.RUN_TYPE
] = planned_experiment.runType
isOncoSameSample = False
if (
RunType.is_dna_rna(planned_experiment.runType)
and planned_experiment.runType != "MIXED"
):
if existing_plan:
sample_count = planned_experiment.get_sample_count()
barcode_count = getPlanBarcodeCount(planned_experiment)
isOncoSameSample = sample_count * 2 == barcode_count
barcoding_step.savedFields[
BarcodeBySampleFieldNames.ONCO_SAME_SAMPLE
] = isOncoSameSample
save_plan_step.savedFields[
SavePlanFieldNames.ONCO_SAME_SAMPLE
] = isOncoSameSample
if sampleset_id:
samplesets = SampleSet.objects.filter(pk__in=sampleset_id.split(","))
if samplesets[0].SampleGroupType_CV:
step_helper.steps[StepNames.APPLICATION].savedFields[
ApplicationFieldNames.SAMPLE_GROUPING
] = samplesets[0].SampleGroupType_CV.pk
else:
samplesets = planned_experiment.sampleSets.all()
save_plan_step.savedObjects[SavePlanBySampleFieldNames.SAMPLESET] = samplesets
sorted_sampleSetItems = []
for sampleset in samplesets:
sorted_sampleSetItems.extend(
list(
sampleset.samples.all().order_by(
"relationshipGroup", "nucleotideType", "sample__displayedName"
)
)
)
barcoding_step.prepopulatedFields[
BarcodeBySampleFieldNames.SAMPLESET_ITEMS
] = sorted_sampleSetItems
barcoding_step.prepopulatedFields[
BarcodeBySampleFieldNames.SHOW_SAMPLESET_INFO
] = (len(samplesets) > 1)
barcoding_step.savedFields[
SavePlanFieldNames.BARCODE_SAMPLE_TUBE_LABEL
] = planned_experiment.sampleTubeLabel
save_plan_step.savedFields[
SavePlanFieldNames.BARCODE_SAMPLE_TUBE_LABEL
] = planned_experiment.sampleTubeLabel
barcoding_step.savedFields[
SavePlanFieldNames.CHIP_BARCODE_LABEL
] = planned_experiment.get_chipBarcode()
save_plan_step.savedFields[
SavePlanFieldNames.CHIP_BARCODE_LABEL
] = planned_experiment.get_chipBarcode()
save_plan_step.savedFields[
SavePlanFieldNames.NOTE
] = planned_experiment.get_notes()
LIMS_meta = planned_experiment.get_LIMS_meta()
if type(LIMS_meta) is list:
# convert list to string
save_plan_step.savedFields[SavePlanFieldNames.LIMS_META] = "".join(
LIMS_meta
)
else:
save_plan_step.savedFields[SavePlanFieldNames.LIMS_META] = LIMS_meta
save_plan_step.savedObjects[SavePlanFieldNames.META] = self._metaDataFromPlan(
step_helper, planned_experiment
)
# Pick barcode set to use:
# 1. Edit/Copy - get from plan
# 2. Create - get from sampleSetItems or, if none, the barcode set selected in the plan template
barcodeSet = planned_experiment.get_barcodeId()
endBarcodeSet = planned_experiment.get_endBarcodeKitName()
if not existing_plan:
for item in sorted_sampleSetItems:
if item.dnabarcode:
barcodeSet = item.dnabarcode.name
break
if item.endDnabarcode:
endBarcodeSet = item.endDnabarcode.name
break
barcoding_step.savedFields[SavePlanFieldNames.BARCODE_SET] = step_helper.steps[
StepNames.KITS
].savedFields[KitsFieldNames.BARCODE_ID] = barcodeSet
self._update_barcode_sets_for_edit(
step_helper, barcodeSet, endBarcodeSet, barcoding_step
)
# IonReporter parameters
irInfo = self._getIRinfo(planned_experiment)
if irInfo:
barcoding_step.prepopulatedFields[SavePlanFieldNames.SELECTED_IR] = irInfo[
"selectedIr"
]
barcoding_step.prepopulatedFields[
SavePlanFieldNames.SETID_SUFFIX
] = irInfo.get("setid_suffix")
userInputInfo = irInfo.get("userInputInfo", [])
iru_hasOncoData = False
iru_hasPgsData = False
if userInputInfo:
for info in userInputInfo:
if info.get("cancerType", "") or info.get("cellularityPct", ""):
iru_hasOncoData = True
if (
info.get("biopsyDays", "")
or info.get("coupleID", "")
or info.get("embryoID", "")
):
iru_hasPgsData = True
if planned_experiment.categories and (
"Oncomine" in planned_experiment.categories
or "Onconet" in planned_experiment.categories
):
iru_hasOncoData = True
barcoding_step.prepopulatedFields[
BarcodeBySampleFieldNames.HAS_ONCO_DATA
] = iru_hasOncoData
barcoding_step.prepopulatedFields[
BarcodeBySampleFieldNames.HAS_PGS_DATA
] = iru_hasPgsData
# TODO if irInfo is missing or this is a new plan creation, do that following (template could have IR pre-selected already!!!)
if (
barcoding_step.sh_type == StepHelperType.CREATE_NEW_PLAN_BY_SAMPLE
or not irInfo
):
sampleSetItem_hasPgsData = False
sampleSetItem_hasOncoData = False
for item in sorted_sampleSetItems:
if item.cancerType or item.cellularityPct:
sampleSetItem_hasOncoData = True
if item.biopsyDays or item.coupleId or item.embryoId:
sampleSetItem_hasPgsData = True
barcoding_step.prepopulatedFields[
BarcodeBySampleFieldNames.HAS_ONCO_DATA
] = sampleSetItem_hasOncoData
barcoding_step.prepopulatedFields[
BarcodeBySampleFieldNames.HAS_PGS_DATA
] = sampleSetItem_hasPgsData
# Populate samples table
if existing_plan:
samplesTable = self._getSamplesTable_from_plan(
planned_experiment, step_helper, irInfo
)
else:
samplesTable = []
for item in sorted_sampleSetItems:
sampleDict = {
"barcodeId": item.dnabarcode.id_str if item.dnabarcode else "",
"endBarcodeId": item.endDnabarcode.id_str
if item.endDnabarcode
else "",
"sampleName": item.sample.displayedName,
"sampleExternalId": item.sample.externalId,
"sampleDescription": item.description,
"nucleotideType": item.get_nucleotideType_for_planning(),
"controlSequenceType": "",
"reference": "",
"targetRegionBedFile": "",
"hotSpotRegionBedFile": "",
"controlType": item.controlType,
"cancerType": "",
"cellularityPct": "",
"irSampleCollectionDate": str(item.sampleCollectionDate),
"irSampleReceiptDate": str(item.sampleReceiptDate),
"irWorkflow": planned_experiment.irworkflow,
"irGender": item.gender,
"irPopulation": item.population,
"irmouseStrains": item.mouseStrains,
"irRelationRole": item.relationshipRole,
"irSetID": item.relationshipGroup,
"ircancerType": item.cancerType,
"ircellularityPct": item.cellularityPct,
"biopsyDays": "",
"cellNum": "",
"coupleID": "",
"embryoID": "",
"irbiopsyDays": item.biopsyDays,
"ircellNum": item.cellNum,
"ircoupleID": item.coupleId,
"irembryoID": item.embryoId,
}
# logger.debug("step_helper_db_loader.updatePlanBySampleSpecificStepHelper() sampleDict=%s" %(sampleDict))
samplesTable.append(sampleDict)
if samplesTable:
barcoding_step.savedObjects[
SavePlanFieldNames.SAMPLES_TABLE_LIST
] = samplesTable
barcoding_step.savedFields[SavePlanFieldNames.SAMPLES_TABLE] = json.dumps(
samplesTable
)
num_samples = len(samplesTable)
if step_helper.isCreate():
if step_helper.isDualNucleotideTypeBySample() and num_samples < 2:
num_samples = 2
barcoding_step.savedFields[
BarcodeBySampleFieldNames.ONCO_SAME_SAMPLE
] = True
barcoding_step.prepopulatedFields[SavePlanFieldNames.NUM_SAMPLES] = num_samples
def _get_all_barcodeSets_n_barcodes_for_selection(self, barcodeSet):
"""
retrieve all active barcode items and items for the input barcodeSet, regardless it is active or not
return a list of barcodeSet names, a list of barcodes with basic info
"""
available_dnaBarcodes = dnaBarcode.objects.filter(
Q(active=True) | Q(name=barcodeSet)
)
barcodeSets = list(
available_dnaBarcodes.values_list("name", flat=True)
.distinct()
.order_by("name")
)
all_barcodes = {}
for bc in available_dnaBarcodes.order_by("name", "index").values(
"name", "id_str", "sequence"
):
all_barcodes.setdefault(bc["name"], []).append(bc)
return barcodeSets, all_barcodes
def _getIRinfo(self, planned_experiment):
# logger.debug("ENTER step_helper_db_loader._getIRinfo()")
# get IonReporterUploader parameters, if any
for plugin_name, plugin_dict in list(
planned_experiment.get_selectedPlugins().items()
):
if "IonReporter" in plugin_name:
try:
plugin = Plugin.objects.filter(name=plugin_name, active=True)[0]
except Exception:
continue
irInfo = {
"selectedIr": plugin,
"irConfigJson": json.dumps(plugin.userinputfields),
"userInputInfo": None,
}
if PluginFieldNames.USER_INPUT in plugin_dict:
# Handle the old and the new style userinput in the plugin dictionary
if isinstance(plugin_dict[PluginFieldNames.USER_INPUT], dict):
userInputInfo = plugin_dict[PluginFieldNames.USER_INPUT].get(
"userInputInfo", []
)
if userInputInfo and len(userInputInfo) > 0:
irInfo["userInputInfo"] = userInputInfo
irInfo["setid_suffix"] = userInputInfo[0]["setid"][
userInputInfo[0]["setid"].find("__") :
]
elif (
isinstance(plugin_dict[PluginFieldNames.USER_INPUT], list)
and len(plugin_dict[PluginFieldNames.USER_INPUT]) > 0
):
irInfo["userInputInfo"] = plugin_dict[
PluginFieldNames.USER_INPUT
]
return irInfo
return None
def _getEndBarcode_for_matching_startBarcode(self, dualBarcodes, startBarcode):
"""
return the endBarcode with matching startBarcode in a list of barcode pairs
dualBarcodes is a list of dualBarcode in the form of startBarcode--endBarcode
e.g., IonXpress_015--IonSet1_15
"""
if not startBarcode or not dualBarcodes:
return ""
for dualBarcode in dualBarcodes:
dualBarcodeTokens = dualBarcode.split(
PlannedExperiment.get_dualBarcodes_delimiter()
)
if len(dualBarcodeTokens) == 2:
# startBarcode
if dualBarcodeTokens[0] == startBarcode:
return dualBarcodeTokens[1]
return ""
def _getSamplesTable_from_plan(self, planned_experiment, step_helper, irInfo=None):
# logger.debug("ENTER step_helper_db_loader._getSamplesTable_from_plan() with step_helper.")
samplesTable = []
planNucleotideType = planned_experiment.get_default_nucleotideType()
runType = planned_experiment.runType
if step_helper.isBarcoded():
# build samples table from barcodedSamples
sample_to_barcode = planned_experiment.get_barcodedSamples()
barcodeSet = planned_experiment.get_barcodeId()
barcode_order = list(
dnaBarcode.objects.filter(name=barcodeSet)
.order_by("index")
.values_list("id_str", flat=True)
)
endBarcodeSet = planned_experiment.get_endBarcodeKitName()
multibarcode_samples = False
# WORKAROUND FOR HUB: plan from HUB can have barcodeKit selected but with empty barcodedSamples JSON blob
application_group_name = (
""
if not planned_experiment.applicationGroup
else planned_experiment.applicationGroup.name
)
# logger.debug("step_helper_db_loader._getSamplesTable_from_plan() application_group_name=%s" %(application_group_name))
if not sample_to_barcode:
# logger.debug("step_helper_db_loader._getSamplesTable_from_plan()")
sampleInfo = None
experiment = planned_experiment.experiment
latest_eas = planned_experiment.latestEAS
if experiment and experiment.samples.count() > 0:
sampleInfo = list(experiment.samples.values())[0]
sampleDict = {
"barcodeId": "",
"endBarcodeId": "",
"sampleName": sampleInfo["displayedName"] if sampleInfo else "",
"sampleExternalId": sampleInfo[SavePlanFieldNames.EXTERNAL_ID]
if sampleInfo
else "",
"sampleDescription": sampleInfo[SavePlanFieldNames.DESCRIPTION]
if sampleInfo
else "",
"nucleotideType": planNucleotideType,
"controlSequenceType": sampleInfo.get(
SavePlanFieldNames.BARCODE_SAMPLE_CONTROL_SEQ_TYPE, ""
)
if sampleInfo
else None,
"reference": planned_experiment.get_library()
if planned_experiment.get_library()
else "",
"hotSpotRegionBedFile": planned_experiment.get_regionfile()
if planned_experiment.get_regionfile()
else "",
"targetRegionBedFile": planned_experiment.get_bedfile()
if planned_experiment.get_bedfile()
else "",
"orderKey": format(1, "05d"),
}
samplesTable.append(sampleDict)
logger.debug(
"step_helper_db_loader._getSamplesTable_from_plan() NO existing barcodedSamples for plan.pk=%d; planName=%s; sampleDict=%s"
% (
planned_experiment.id,
planned_experiment.planDisplayedName,
sampleDict,
)
)
else:
for sample, value in list(sample_to_barcode.items()):
dualBarcodes = []
if "dualBarcodes" in value:
dualBarcodes = value[SavePlanFieldNames.DUAL_BARCODES_DB_KEY]
if "barcodeSampleInfo" in value:
multibarcode_samples = len(value["barcodeSampleInfo"]) > 1
for barcode, sampleInfo in list(
value["barcodeSampleInfo"].items()
):
sampleReference = sampleInfo.get(
SavePlanFieldNames.BARCODE_SAMPLE_REFERENCE, ""
)
sampleHotSpotRegionBedFile = sampleInfo.get(
SavePlanFieldNames.BARCODE_SAMPLE_HOTSPOT_REGION_BED_FILE,
"",
)
sampleTargetRegionBedFile = sampleInfo.get(
SavePlanFieldNames.BARCODE_SAMPLE_TARGET_REGION_BED_FILE,
"",
)
if not RunType.is_dna_rna(runType):
if (
not sampleReference
and not step_helper.isReferenceBySample()
):
if not sampleReference:
sampleReference = (
planned_experiment.get_library()
)
if not sampleHotSpotRegionBedFile:
sampleHotSpotRegionBedFile = (
planned_experiment.get_regionfile()
)
if not sampleTargetRegionBedFile:
sampleTargetRegionBedFile = (
planned_experiment.get_bedfile()
)
endBarcode = self._getEndBarcode_for_matching_startBarcode(
dualBarcodes, barcode
)
order_counter = (
barcode_order.index(barcode) + 1
if barcode in barcode_order
else 0
)
sampleDict = {
"barcodeId": barcode,
"endBarcodeId": endBarcode,
"sampleName": sample,
"sampleExternalId": sampleInfo.get(
SavePlanFieldNames.EXTERNAL_ID, ""
),
"sampleDescription": sampleInfo.get(
SavePlanFieldNames.DESCRIPTION, ""
),
"nucleotideType": sampleInfo.get(
SavePlanFieldNames.BARCODE_SAMPLE_NUCLEOTIDE_TYPE,
planNucleotideType,
),
"controlSequenceType": sampleInfo.get(
SavePlanFieldNames.BARCODE_SAMPLE_CONTROL_SEQ_TYPE,
"",
),
"reference": sampleReference,
"hotSpotRegionBedFile": sampleHotSpotRegionBedFile,
"targetRegionBedFile": sampleTargetRegionBedFile,
"controlType": sampleInfo.get(
SavePlanFieldNames.BARCODE_SAMPLE_CONTROL_TYPE, ""
),
"orderKey": format(order_counter, "05d"),
}
samplesTable.append(sampleDict)
# logger.debug("step_helper_db_loader._getSamplesTable_from_plan() barcodeSampleInfo plan.pk=%d; planName=%s; sampleName=%s; sampleDict=%s" % (planned_experiment.id, planned_experiment.planDisplayedName, sample, sampleDict))
else:
multibarcode_samples = len(value.get("barcodes", [])) > 1
for barcode in value.get("barcodes", []):
order_counter = (
barcode_order.index(barcode) + 1
if barcode in barcode_order
else 0
)
endBarcode = self._getEndBarcode_for_matching_startBarcode(
dualBarcodes, barcode
)
sampleDict = {
"barcodeId": barcode,
"endBarcodeId": endBarcode,
"sampleName": sample,
"sampleExternalId": None,
"sampleDescription": None,
"nucleotideType": planNucleotideType,
"controlSequenceType": None,
"reference": planned_experiment.get_library(),
"hotSpotRegionBedFile": ""
if planNucleotideType == "RNA"
else planned_experiment.get_regionfile(),
"targetRegionBedFile": ""
if planNucleotideType == "RNA"
else planned_experiment.get_bedfile(),
"orderKey": format(order_counter, "05d"),
}
samplesTable.append(sampleDict)
# add IR values
if irInfo and irInfo["userInputInfo"]:
barcodeToIrValues = {}
for irvalues in irInfo["userInputInfo"]:
barcodeId = irvalues.get("barcodeId")
if barcodeId:
barcodeToIrValues[barcodeId] = irvalues
for sampleDict in samplesTable:
for irkey, irvalue in list(
barcodeToIrValues.get(sampleDict["barcodeId"], {}).items()
):
if irkey == "Relation":
sampleDict["irRelationshipType"] = irvalue
elif irkey == "setid":
setid = irvalue.split("__")[0]
sampleDict["irSetID"] = setid
if setid and setid.isdigit():
sampleDict["orderKey"] = "%05d_%s" % (
int(setid),
sampleDict["orderKey"],
)
else:
sampleDict["ir" + irkey] = irvalue
# sort barcoded samples table
samplesTable.sort(key=lambda item: item["orderKey"])
# if same sample for dual nuc type want to order by the DNA/RNA sample pair
if multibarcode_samples:
if (
RunType.is_dna_rna(planned_experiment.runType)
and planned_experiment.runType != "MIXED"
):
samplesTable.sort(
key=lambda item: (
item["sampleName"],
item[SavePlanFieldNames.BARCODE_SAMPLE_NUCLEOTIDE_TYPE],
)
)
else:
# when we load a non-barcoded run for editing/copying we know it will only have a single sample.
sampleTubeLabel = (
"" if step_helper.isCopy() else planned_experiment.sampleTubeLabel
)
if sampleTubeLabel is None:
sampleTubeLabel = ""
# when we load a non-barcoded run for editing/copying we know it will only have a single chip barcode.
chipBarcode = (
"" if step_helper.isCopy() else planned_experiment.get_chipBarcode()
)
if chipBarcode is None:
chipBarcode = ""
sampleDict = {
"sampleName": planned_experiment.get_sampleDisplayedName(),
"sampleExternalId": planned_experiment.get_sample_external_id(),
"sampleDescription": planned_experiment.get_sample_description(),
"tubeLabel": sampleTubeLabel,
"chipBarcode": chipBarcode,
"nucleotideType": planNucleotideType,
"orderKey": format(1, "05d"),
}
# add IR values
if irInfo and irInfo["userInputInfo"]:
for irkey, irvalue in list(irInfo["userInputInfo"][0].items()):
if irkey == "Relation":
sampleDict["irRelationshipType"] = irvalue
elif irkey == "setid":
sampleDict["irSetID"] = irvalue.split("__")[0]
else:
sampleDict["ir" + irkey] = irvalue
samplesTable = [sampleDict]
return samplesTable
def getStepHelperForTemplatePlannedExperiment(
self, pe_id, step_helper_type=StepHelperType.EDIT_TEMPLATE, sampleset_id=None
):
"""
Get a step helper from a template planned experiment.
"""
logger.debug(
"ENTER step_helper_db_loader.getStepHelperForTemplatePlannedExperiment() step_helper_type=%s; pe_id=%s"
% (step_helper_type, str(pe_id))
)
planned_experiment = PlannedExperiment.objects.get(pk=pe_id)
if not planned_experiment.isReusable:
raise ValueError(
validation.invalid_required_value_not_polymorphic_type_value(
PlanTemplate.verbose_name, "id", Plan.verbose_name, "id"
)
)
runType = planned_experiment.runType
if runType:
runTypeObjs = RunType.objects.filter(runType=runType)
if runTypeObjs.count > 0:
# logger.debug("step_helper_db_loader.getStepHelperForTemplatePlannedExperiment() runType_id=%d" %(runTypeObjs[0].id))
step_helper = self.getStepHelperForTemplateRunType(
runTypeObjs[0].id, step_helper_type, pe_id
)
else:
step_helper = StepHelper(
sh_type=step_helper_type, previous_template_id=pe_id
)
else:
step_helper = StepHelper(
sh_type=step_helper_type, previous_template_id=pe_id
)
planDisplayedName = getPlanDisplayedName(planned_experiment)
step_helper.parentName = planDisplayedName
step_helper.isParentSystem = planned_experiment.isSystem
self.updateUniversalStepHelper(step_helper, planned_experiment)
if step_helper.isPlan() and step_helper.isPlanBySample():
self.updatePlanBySampleSpecificStepHelper(
step_helper, planned_experiment, sampleset_id
)
elif step_helper.isPlan():
self.updatePlanSpecificStepHelper(step_helper, planned_experiment, True)
else:
self.updateTemplateSpecificStepHelper(step_helper, planned_experiment)
self.generate_warnings(step_helper)
return step_helper
def getStepHelperForPlanPlannedExperiment(
self, pe_id, step_helper_type=StepHelperType.EDIT_PLAN
):
"""
Get a plan step helper from a plan planned experiment.
"""
logger.debug(
"ENTER step_helper_db_loader.getStepHelperForPlanPlannedExperiment() step_helper_type=%s; pe_id=%s"
% (step_helper_type, str(pe_id))
)
planned_experiment = PlannedExperiment.objects.get(pk=pe_id)
if step_helper_type == StepHelperType.EDIT_RUN:
step_helper = StepHelper(
sh_type=step_helper_type,
previous_plan_id=pe_id,
experiment_id=planned_experiment.experiment.id,
)
else:
step_helper = StepHelper(sh_type=step_helper_type, previous_plan_id=pe_id)
planDisplayedName = getPlanDisplayedName(planned_experiment)
step_helper.parentName = planDisplayedName
if planned_experiment.isReusable:
raise ValueError(
validation.invalid_required_value_not_polymorphic_type_value(
Plan.verbose_name, "id", PlanTemplate.verbose_name, "id"
)
)
step_helper.isParentSystem = planned_experiment.isSystem
self.updateUniversalStepHelper(step_helper, planned_experiment)
if step_helper.isPlan() and step_helper.isPlanBySample():
self.updatePlanBySampleSpecificStepHelper(step_helper, planned_experiment)
elif step_helper.isPlan():
self.updatePlanSpecificStepHelper(step_helper, planned_experiment)
else:
raise ValueError(
ugettext_lazy("workflow.messages.errors.internal.data.initialization")
) # "Cannot prepare data for planning in the plan wizard."
self.generate_warnings(step_helper)
return step_helper
def generate_warnings(self, step_helper):
""" add step warnings if any selections are obsolete """
if step_helper.isEditRun():
return
kits_step_data = step_helper.steps[StepNames.KITS]
check_kitInfo = [
(
ugettext_lazy("workflow.step.kits.fields.librarykitname.label"),
["LibraryKit", "LibraryPrepKit"],
kits_step_data.savedFields[KitsFieldNames.LIBRARY_KIT_NAME],
),
(
ugettext_lazy("workflow.step.kits.fields.templatekitname.label"),
["TemplatingKit", "IonChefPrepKit"],
kits_step_data.savedFields[KitsFieldNames.TEMPLATE_KIT_NAME],
),
(
ugettext_lazy("workflow.step.kits.fields.sequenceKit.label"),
["SequencingKit"],
kits_step_data.savedFields[KitsFieldNames.SEQUENCE_KIT_NAME],
),
(
ugettext_lazy("workflow.step.kits.fields.controlsequence.label"),
["ControlSequenceKit"],
kits_step_data.savedFields[KitsFieldNames.CONTROL_SEQUENCE],
),
(
ugettext_lazy("workflow.step.kits.fields.samplePreparationKit.label"),
["SamplePrepKit"],
kits_step_data.savedFields[KitsFieldNames.SAMPLE_PREPARATION_KIT],
),
]
for display, types, kit in check_kitInfo:
if kit:
qs = KitInfo.objects.filter(name=kit, kitType__in=types)
if qs:
if not qs[0].isActive:
kits_step_data.warnings.append(
validation.invalid_not_active(display, kit)
)
else:
kits_step_data.warnings.append(
validation.invalid_not_found_error(display, kit)
)
# barcode set
barcodeKit = kits_step_data.savedFields[KitsFieldNames.BARCODE_ID]
if barcodeKit:
qs = dnaBarcode.objects.filter(name=barcodeKit)
if qs:
if not qs.filter(active=True):
kits_step_data.warnings.append(
validation.invalid_not_active(
ugettext_lazy("workflow.step.kits.fields.barcodeId.label"),
barcodeKit,
)
)
else:
kits_step_data.warnings.append(
validation.invalid_not_found_error(
ugettext_lazy("workflow.step.kits.fields.barcodeId.label"),
barcodeKit,
)
)
# 20170928-TODO-WIP
# end barcode set
"""
barcodeKit = kits_step_data.savedFields[KitsFieldNames.END_BARCODE_ID]
if barcodeKit:
qs = dnaBarcode.objects.filter(name=barcodeKit)
if qs:
if not qs.filter(active=True):
kits_step_data.warnings.append(validation.invalid_not_active('Ending Barcode Set', barcodeKit)) # TODO: i18n post 5.8
else:
kits_step_data.warnings.append(validation.invalid_not_found_error('Ending Barcode Set', barcodeKit)) # TODO: i18n post 5.8
"""
# chip
chip = kits_step_data.savedFields[KitsFieldNames.CHIP_TYPE]
if chip:
qs = Chip.objects.filter(name=chip)
if qs:
if not qs.filter(isActive=True):
kits_step_data.warnings.append(
validation.invalid_not_active(
ugettext_lazy("workflow.step.kits.fields.chipType.label"),
chip,
)
)
else:
kits_step_data.warnings.append(
validation.invalid_not_found_error(
ugettext_lazy("workflow.step.kits.fields.chipType.label"), chip
)
)
def get_kit_advanced_settings(self, step_helper, planned_experiment=None):
"""
Attempt to get "recommended" parameters for Kits Chevron
1) if starting from System Template: use the System Template
2) if creating from runType: use step_helper parameters (this would've come from relevant applProduct)
3) if a plan/template previously created from System Template: use the System Template if application haven't changed
4) if a plan/template previously created NOT from System Template: don't have "recommended" parameters
"""
advanced_settings = {}
system_template = None
if planned_experiment:
# starting from existing Plan or Template
if planned_experiment.isSystem and planned_experiment.isReusable:
system_template = planned_experiment
elif (
planned_experiment.metaData
and planned_experiment.metaData.get("fromTemplateSource") == "ION"
):
try:
system_template = PlannedExperiment.objects.get(
planName=planned_experiment.metaData.get("fromTemplate")
)
if (
system_template.runType != planned_experiment.runType
or system_template.experiment.getPlatform
!= planned_experiment.experiment.getPlatform
):
system_template = None
except Exception:
pass
if system_template:
advanced_settings = {
KitsFieldNames.BASE_RECALIBRATE: system_template.get_base_recalibration_mode(),
KitsFieldNames.FLOW_ORDER: system_template.experiment.flowsInOrder,
KitsFieldNames.FORWARD_3_PRIME_ADAPTER: system_template.get_forward3primeadapter(),
KitsFieldNames.LIBRARY_KEY: system_template.get_libraryKey(),
KitsFieldNames.SAMPLE_PREP_PROTOCOL: system_template.samplePrepProtocol,
KitsFieldNames.TF_KEY: system_template.get_tfKey(),
}
else:
kits_step_data = step_helper.steps[StepNames.KITS]
advanced_settings = {
KitsFieldNames.BASE_RECALIBRATE: kits_step_data.savedFields[
KitsFieldNames.BASE_RECALIBRATE
],
KitsFieldNames.FLOW_ORDER: kits_step_data.savedFields[
KitsFieldNames.FLOW_ORDER
],
KitsFieldNames.FORWARD_3_PRIME_ADAPTER: kits_step_data.savedFields[
KitsFieldNames.FORWARD_3_PRIME_ADAPTER
],
KitsFieldNames.LIBRARY_KEY: kits_step_data.savedFields[
KitsFieldNames.LIBRARY_KEY
],
KitsFieldNames.SAMPLE_PREP_PROTOCOL: kits_step_data.savedFields[
KitsFieldNames.SAMPLE_PREP_PROTOCOL
],
KitsFieldNames.TF_KEY: kits_step_data.savedFields[
KitsFieldNames.TF_KEY
],
}
return advanced_settings
| 42.264978 | 252 | 0.608707 |
4a22a41a257577f1b9989bf9a03c9d815786513b | 390 | py | Python | thunau/wsgi.py | acdh-oeaw/thunau-old | a3023885470e80f7312e43561028398bffd713e0 | [
"MIT"
] | null | null | null | thunau/wsgi.py | acdh-oeaw/thunau-old | a3023885470e80f7312e43561028398bffd713e0 | [
"MIT"
] | 9 | 2020-02-12T00:19:18.000Z | 2021-12-13T19:46:51.000Z | thunau/wsgi.py | acdh-oeaw/thunau | 06e4d54f4553939ffba3c504088055c3807328c6 | [
"MIT"
] | null | null | null | """
WSGI config for thunau project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thunau.settings")
application = get_wsgi_application()
| 22.941176 | 78 | 0.784615 |
4a22a4b5600a80d54d1538c71500a5ab2d20c62f | 2,937 | py | Python | Model/solver.py | Spencerfar/djin-aging | f6513226e879e6061996d819b4de0e2873860fbc | [
"MIT"
] | 3 | 2021-08-24T08:33:45.000Z | 2022-01-18T23:50:33.000Z | Model/solver.py | Spencerfar/djin-aging | f6513226e879e6061996d819b4de0e2873860fbc | [
"MIT"
] | null | null | null | Model/solver.py | Spencerfar/djin-aging | f6513226e879e6061996d819b4de0e2873860fbc | [
"MIT"
] | 1 | 2021-08-24T08:34:30.000Z | 2021-08-24T08:34:30.000Z | import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
class SolveSDE(nn.Module):
def __init__(self, N, device, dt = 0.5, length = 25):
super(SolveSDE, self).__init__()
self.device = device
self.N = N
self.dt = dt
self.num_t = int(length/dt)
def _solve(self, model, x0, t0, M, context, h, W):
X = torch.zeros((M, self.num_t, self.N)).to(self.device)
log_S = torch.zeros((M, self.num_t)).to(self.device)
log_Gammas = torch.zeros((M, self.num_t)).to(self.device)
sigma_xs = torch.zeros((M, self.num_t, self.N)).to(self.device)
drifts = torch.zeros((M, self.num_t, self.N)).to(self.device)
times = torch.zeros((M, self.num_t)).to(self.device)
X[:,0,:] = x0
times[:,0] = t0
log_Gammas[:,0] = -1e5
for i in range(1, self.num_t):
dx, log_dS, log_Gamma, h, sigma_x = model(X[:, i-1, :], h, times[:,i-1], context, W)
x_tilde = X[:, i-1, :] + self.dt*dx + sigma_x*np.sqrt(self.dt)
X[:, i, :] = X[:, i-1, :] + self.dt*dx + torch.randn_like(X[:,i-1,:])*sigma_x*np.sqrt(self.dt) + 0.5*(model.sigma_x(x_tilde) - sigma_x)*(self.dt*torch.randn_like(X[:,i-1,:]).pow(2) - self.dt)/np.sqrt(self.dt)
log_S[:, i] = log_S[:, i-1] + self.dt*log_dS
log_Gammas[:,i] = log_Gamma.reshape(M)
times[:,i] = times[:,i-1] + self.dt
sigma_xs[:, i] = sigma_x
drifts[:,i] = dx
sigma_xs[:,0] = sigma_xs[:,1]
return times, X, log_S, log_Gammas, sigma_xs, drifts
def _solve_prior(self, model, x0, t0, M, context, h, W):
X = torch.zeros((M, self.num_t, self.N)).to(self.device)
log_S = torch.zeros((M, self.num_t)).to(self.device)
log_Gammas = torch.zeros((M, self.num_t)).to(self.device)
sigma_xs = torch.zeros((M, self.num_t, self.N)).to(self.device)
times = torch.zeros((M, self.num_t)).to(self.device)
X[:,0,:] = x0
times[:,0] = t0
log_Gammas[:,0] = -1e5
for i in range(1, self.num_t):
dx, log_dS, log_Gamma, h, sigma_x = model.prior_sim(X[:, i-1, :], h, times[:,i-1], context, W)
x_tilde = X[:, i-1, :] + self.dt*dx + sigma_x*np.sqrt(self.dt)
X[:, i, :] = X[:, i-1, :] + self.dt*dx + torch.randn_like(X[:,i-1,:])*sigma_x*np.sqrt(self.dt) + 0.5*(model.sigma_x(x_tilde) - sigma_x)*(self.dt*torch.randn_like(X[:,i-1,:]).pow(2) - self.dt)/np.sqrt(self.dt)
log_S[:, i] = log_S[:, i-1] + self.dt*log_dS
log_Gammas[:,i] = log_Gamma.reshape(M)
times[:,i] = times[:,i-1] + self.dt
sigma_xs[:, i] = sigma_x
return times, X, log_S, log_Gammas, sigma_xs
| 39.16 | 220 | 0.516173 |
4a22a4fc272fb6d5599567477441abeddb155952 | 7,484 | py | Python | data/ms_dataset.py | han-liu/ModDropPlusPlus | 254e6d40a755cf1f00e544b6fcc0b3f587fd38e8 | [
"MIT"
] | 1 | 2022-03-23T03:35:39.000Z | 2022-03-23T03:35:39.000Z | data/ms_dataset.py | han-liu/ModDropPlusPlus | 254e6d40a755cf1f00e544b6fcc0b3f587fd38e8 | [
"MIT"
] | null | null | null | data/ms_dataset.py | han-liu/ModDropPlusPlus | 254e6d40a755cf1f00e544b6fcc0b3f587fd38e8 | [
"MIT"
] | null | null | null | import os.path
import random
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch
import cv2
import numpy as np
from data.base_dataset import BaseDataset
from configurations import *
import copy
from albumentations.augmentations.functional import grid_distortion
import matplotlib.pyplot as plt
def get_2d_paths(dir):
arrays = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if fname.endswith('.pkl'):
path = os.path.join(root, fname)
arrays.append(path)
return arrays
def augmentations(data, ratio, opt):
height, width = data['mask'].shape[:2] # height/y for first axis, width/x for second axis
for axis in [1, 0]:
if random.random() < 0.3:
for modality in MODALITIES + ['mask']:
data[modality] = np.flip(data[modality], axis).copy()
if random.random() < 0.5:
height, width = width, height
for modality in MODALITIES:
data[modality] = np.transpose(data[modality], (1, 0, 2))
data['mask'] = np.transpose(data['mask'], (1, 0))
need_resize = False
if random.random() < 0:
crop_size = random.randint(int(opt.trainSize / 1.5), min(height, width))
need_resize = True
else:
crop_size = opt.trainSize
mask = data['mask']
if np.sum(mask) == 0 or random.random() < 0.005:
x_min = random.randint(0, width - crop_size)
y_min = random.randint(0, height - crop_size)
else:
non_zero_yx = np.argwhere(mask)
y, x = random.choice(non_zero_yx)
x_min = x - random.randint(0, crop_size - 1)
y_min = y - random.randint(0, crop_size - 1)
x_min = np.clip(x_min, 0, width - crop_size)
y_min = np.clip(y_min, 0, height - crop_size)
for modality in MODALITIES + ['mask']:
interpolation = cv2.INTER_LINEAR
data[modality] = data[modality][y_min:y_min + crop_size, x_min:x_min + crop_size]
if need_resize:
data[modality] = cv2.resize(data[modality], (opt.trainSize, opt.trainSize), interpolation)
data['mask'] = (data['mask'] > 0.5).astype(np.float32)
return data
#=======================================================================================================================
# Code description
# This class is used for:
# (1) independent models (fixed combination of modalities)
# (2) ModDrop: regular modality dropout
# (3) ModDrop+: dynamic filter network ONLY (without intra-subject co-training)
"""
class MsDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.use_modality_dropout = opt.use_modality_dropout
self.all_paths = []
for dataset_name in DATASETS:
self.dir_data = os.path.join(opt.dataroot, dataset_name, opt.phase)
self.all_paths += sorted(get_2d_paths(self.dir_data))
def __getitem__(self, index):
path_this_sample = self.all_paths[index]
data_all_modalities = np.load(path_this_sample, allow_pickle=True)
# store the available modalities in a list
data_return = {'paths': path_this_sample}
available = []
for modality in MODALITIES:
if modality in data_all_modalities:
available.append(modality)
data_return[modality] = data_all_modalities[modality]
else:
data_return[modality] = np.zeros(data_all_modalities['t1'].shape)
data_return['mask'] = data_all_modalities['mask'][:, :, 0]
# augmentation
data_return = augmentations(data_return, data_all_modalities['ratio'], self.opt)
# preprocessing
for modality in available:
data_return[modality] = data_return[modality] / 2 - 1
data_return['mask'] = data_return['mask'] * 2 - 1
data_return['dc'] = data_all_modalities['dc']
data_return['mc'] = data_all_modalities['mc']
for modality in MODALITIES:
data_return[modality] = transforms.ToTensor()(data_return[modality]).float()
# ======== modality dropout ========
if self.use_modality_dropout:
mc_idx = list(np.where(data_return['mc'] == 1)[0])
zero_idx = random.sample(mc_idx, random.randint(0, len(mc_idx)-1))
for idx in zero_idx:
# image set as zero tensor
data_return[MODALITIES[idx]] = torch.zeros(data_return[MODALITIES[idx]].size())
data_return['mc'][idx] = 0 # modality code set as 0
return data_return
def __len__(self):
return len(self.all_paths)
def name(self):
return 'MsDataset'
"""
#=======================================================================================================================
# Code description:
# This class is used for ModDrop++: (1) dynamic filter network and (2) intra-subject co-training. This dataloader
# returns both (1) full-modality data and (2) missing modality data (randomly dropped) from the same subject.
class MsDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.use_modality_dropout = opt.use_modality_dropout
self.all_paths = []
for dataset_name in DATASETS:
self.dir_data = os.path.join(opt.dataroot, dataset_name, opt.phase)
self.all_paths += sorted(get_2d_paths(self.dir_data))
def __getitem__(self, index):
path_this_sample = self.all_paths[index]
data_all_modalities = np.load(path_this_sample, allow_pickle=True)
# store the available modalities in a list
data_full = {'paths': path_this_sample}
available = []
for modality in MODALITIES:
if modality in data_all_modalities:
available.append(modality)
data_full[modality] = data_all_modalities[modality]
else:
data_full[modality] = np.zeros(data_all_modalities['t1'].shape)
data_full['mask'] = data_all_modalities['mask'][:, :, 0]
# augmentation
data_full = augmentations(data_full, data_all_modalities['ratio'], self.opt)
# preprocessing
for modality in available:
data_full[modality] = data_full[modality] / 2 - 1
data_full['mask'] = data_full['mask'] * 2 - 1
data_full['dc'] = data_all_modalities['dc']
data_full['mc'] = data_all_modalities['mc']
for modality in MODALITIES:
data_full[modality] = transforms.ToTensor()(data_full[modality]).float()
data_miss = copy.deepcopy(data_full)
# === modality dropout ===
if self.use_modality_dropout:
mc_idx = list(np.where(data_miss['mc'] == 1)[0])
zero_idx = random.sample(mc_idx, random.randint(0, len(mc_idx) - 1))
for idx in zero_idx:
data_miss[MODALITIES[idx]] = torch.zeros(data_miss[MODALITIES[idx]].size()) # image set as zero tensor
data_miss['mc'][idx] = 0 # modality code set as 0
return data_full, data_miss
def __len__(self):
return len(self.all_paths)
def name(self):
return 'MsDataset' | 37.42 | 120 | 0.608231 |
4a22a5e9a16c8bb2b98d97342c4e81cf994a900f | 9,771 | py | Python | tests/conftest.py | jasadams/aws-data-wrangler | 36aad847d48ebeafc6639a82a2a5107892dfcc06 | [
"Apache-2.0"
] | 1 | 2021-04-13T06:51:54.000Z | 2021-04-13T06:51:54.000Z | tests/conftest.py | timmylicheng/aws-data-wrangler | 6ae62354a82f41d38b1a20da3ab3f0baf0fe436d | [
"Apache-2.0"
] | 63 | 2021-05-31T08:35:17.000Z | 2022-03-28T08:12:04.000Z | tests/conftest.py | Thiago-Dantas/aws-data-wrangler | b13fcd8d169feb3219b4b4fff025dc6089cfe03b | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import boto3 # type: ignore
import pytest # type: ignore
import awswrangler as wr
from ._utils import create_workgroup, extract_cloudformation_outputs, get_time_str_with_random_suffix, path_generator
@pytest.fixture(scope="session")
def cloudformation_outputs():
return extract_cloudformation_outputs()
@pytest.fixture(scope="session")
def region(cloudformation_outputs):
return cloudformation_outputs["Region"]
@pytest.fixture(scope="session")
def bucket(cloudformation_outputs):
return cloudformation_outputs["BucketName"]
@pytest.fixture(scope="session")
def glue_database(cloudformation_outputs):
return cloudformation_outputs["GlueDatabaseName"]
@pytest.fixture(scope="session")
def kms_key(cloudformation_outputs):
return cloudformation_outputs["KmsKeyArn"]
@pytest.fixture(scope="session")
def kms_key_id(kms_key):
return kms_key.split("/", 1)[1]
@pytest.fixture(scope="session")
def loggroup(cloudformation_outputs):
loggroup_name = cloudformation_outputs["LogGroupName"]
logstream_name = cloudformation_outputs["LogStream"]
client = boto3.client("logs")
response = client.describe_log_streams(logGroupName=loggroup_name, logStreamNamePrefix=logstream_name)
token = response["logStreams"][0].get("uploadSequenceToken")
events = []
for i in range(5):
events.append({"timestamp": int(1000 * datetime.now().timestamp()), "message": str(i)})
args = {"logGroupName": loggroup_name, "logStreamName": logstream_name, "logEvents": events}
if token:
args["sequenceToken"] = token
try:
client.put_log_events(**args)
except client.exceptions.DataAlreadyAcceptedException:
pass # Concurrency
while True:
results = wr.cloudwatch.run_query(log_group_names=[loggroup_name], query="fields @timestamp | limit 5")
if len(results) >= 5:
break
yield loggroup_name
@pytest.fixture(scope="session")
def workgroup0(bucket):
return create_workgroup(
wkg_name="aws_data_wrangler_0",
config={
"ResultConfiguration": {"OutputLocation": f"s3://{bucket}/athena_workgroup0/"},
"EnforceWorkGroupConfiguration": True,
"PublishCloudWatchMetricsEnabled": True,
"BytesScannedCutoffPerQuery": 100_000_000,
"RequesterPaysEnabled": False,
},
)
@pytest.fixture(scope="session")
def workgroup1(bucket):
return create_workgroup(
wkg_name="aws_data_wrangler_1",
config={
"ResultConfiguration": {
"OutputLocation": f"s3://{bucket}/athena_workgroup1/",
"EncryptionConfiguration": {"EncryptionOption": "SSE_S3"},
},
"EnforceWorkGroupConfiguration": True,
"PublishCloudWatchMetricsEnabled": True,
"BytesScannedCutoffPerQuery": 100_000_000,
"RequesterPaysEnabled": False,
},
)
@pytest.fixture(scope="session")
def workgroup2(bucket, kms_key):
return create_workgroup(
wkg_name="aws_data_wrangler_2",
config={
"ResultConfiguration": {
"OutputLocation": f"s3://{bucket}/athena_workgroup2/",
"EncryptionConfiguration": {"EncryptionOption": "SSE_KMS", "KmsKey": kms_key},
},
"EnforceWorkGroupConfiguration": False,
"PublishCloudWatchMetricsEnabled": True,
"BytesScannedCutoffPerQuery": 100_000_000,
"RequesterPaysEnabled": False,
},
)
@pytest.fixture(scope="session")
def workgroup3(bucket, kms_key):
return create_workgroup(
wkg_name="aws_data_wrangler_3",
config={
"ResultConfiguration": {
"OutputLocation": f"s3://{bucket}/athena_workgroup3/",
"EncryptionConfiguration": {"EncryptionOption": "SSE_KMS", "KmsKey": kms_key},
},
"EnforceWorkGroupConfiguration": True,
"PublishCloudWatchMetricsEnabled": True,
"BytesScannedCutoffPerQuery": 100_000_000,
"RequesterPaysEnabled": False,
},
)
@pytest.fixture(scope="session")
def databases_parameters(cloudformation_outputs):
parameters = dict(postgresql={}, mysql={}, redshift={}, sqlserver={})
parameters["postgresql"]["host"] = cloudformation_outputs["PostgresqlAddress"]
parameters["postgresql"]["port"] = 3306
parameters["postgresql"]["schema"] = "public"
parameters["postgresql"]["database"] = "postgres"
parameters["mysql"]["host"] = cloudformation_outputs["MysqlAddress"]
parameters["mysql"]["port"] = 3306
parameters["mysql"]["schema"] = "test"
parameters["mysql"]["database"] = "test"
parameters["redshift"]["host"] = cloudformation_outputs["RedshiftAddress"]
parameters["redshift"]["port"] = cloudformation_outputs["RedshiftPort"]
parameters["redshift"]["identifier"] = cloudformation_outputs["RedshiftIdentifier"]
parameters["redshift"]["schema"] = "public"
parameters["redshift"]["database"] = "test"
parameters["redshift"]["role"] = cloudformation_outputs["RedshiftRole"]
parameters["password"] = cloudformation_outputs["DatabasesPassword"]
parameters["user"] = "test"
parameters["sqlserver"]["host"] = cloudformation_outputs["SqlServerAddress"]
parameters["sqlserver"]["port"] = 1433
parameters["sqlserver"]["schema"] = "dbo"
parameters["sqlserver"]["database"] = "test"
return parameters
@pytest.fixture(scope="session")
def redshift_external_schema(cloudformation_outputs, databases_parameters, glue_database):
region = cloudformation_outputs.get("Region")
sql = f"""
CREATE EXTERNAL SCHEMA IF NOT EXISTS aws_data_wrangler_external FROM data catalog
DATABASE '{glue_database}'
IAM_ROLE '{databases_parameters["redshift"]["role"]}'
REGION '{region}';
"""
con = wr.redshift.connect(connection="aws-data-wrangler-redshift")
with con.cursor() as cursor:
cursor.execute(sql)
con.commit()
con.close()
return "aws_data_wrangler_external"
@pytest.fixture(scope="session")
def account_id():
return boto3.client("sts").get_caller_identity().get("Account")
@pytest.fixture(scope="function")
def glue_ctas_database():
name = f"db_{get_time_str_with_random_suffix()}"
print(f"Database name: {name}")
wr.catalog.create_database(name=name)
yield name
wr.catalog.delete_database(name=name)
print(f"Database {name} deleted.")
@pytest.fixture(scope="function")
def glue_table(glue_database: str) -> None:
name = f"tbl_{get_time_str_with_random_suffix()}"
print(f"Table name: {name}")
wr.catalog.delete_table_if_exists(database=glue_database, table=name)
yield name
wr.catalog.delete_table_if_exists(database=glue_database, table=name)
print(f"Table {glue_database}.{name} deleted.")
@pytest.fixture(scope="function")
def glue_table2(glue_database):
name = f"tbl_{get_time_str_with_random_suffix()}"
print(f"Table name: {name}")
wr.catalog.delete_table_if_exists(database=glue_database, table=name)
yield name
wr.catalog.delete_table_if_exists(database=glue_database, table=name)
@pytest.fixture(scope="function")
def path(bucket):
yield from path_generator(bucket)
@pytest.fixture(scope="function")
def path2(bucket):
yield from path_generator(bucket)
@pytest.fixture(scope="function")
def path3(bucket):
yield from path_generator(bucket)
@pytest.fixture(scope="function")
def redshift_table():
name = f"tbl_{get_time_str_with_random_suffix()}"
print(f"Table name: {name}")
yield name
con = wr.redshift.connect("aws-data-wrangler-redshift")
with con.cursor() as cursor:
cursor.execute(f"DROP TABLE IF EXISTS public.{name}")
con.commit()
con.close()
@pytest.fixture(scope="function")
def postgresql_table():
name = f"tbl_{get_time_str_with_random_suffix()}"
print(f"Table name: {name}")
yield name
con = wr.postgresql.connect("aws-data-wrangler-postgresql")
with con.cursor() as cursor:
cursor.execute(f"DROP TABLE IF EXISTS public.{name}")
con.commit()
con.close()
@pytest.fixture(scope="function")
def mysql_table():
name = f"tbl_{get_time_str_with_random_suffix()}"
print(f"Table name: {name}")
yield name
con = wr.mysql.connect("aws-data-wrangler-mysql")
with con.cursor() as cursor:
cursor.execute(f"DROP TABLE IF EXISTS test.{name}")
con.commit()
con.close()
@pytest.fixture(scope="function")
def sqlserver_table():
name = f"tbl_{get_time_str_with_random_suffix()}"
print(f"Table name: {name}")
yield name
con = wr.sqlserver.connect("aws-data-wrangler-sqlserver")
with con.cursor() as cursor:
cursor.execute(f"IF OBJECT_ID(N'dbo.{name}', N'U') IS NOT NULL DROP TABLE dbo.{name}")
con.commit()
con.close()
@pytest.fixture(scope="function")
def timestream_database_and_table():
name = f"tbl_{get_time_str_with_random_suffix()}"
print(f"Timestream name: {name}")
wr.timestream.create_database(name)
wr.timestream.create_table(name, name, 1, 1)
yield name
wr.timestream.delete_table(name, name)
wr.timestream.delete_database(name)
@pytest.fixture(scope="function")
def compare_filename_prefix():
def assert_filename_prefix(filename, filename_prefix, test_prefix):
if filename_prefix:
assert filename.startswith(test_prefix)
else:
assert not filename.startswith(test_prefix)
return assert_filename_prefix
@pytest.fixture(scope="function")
def random_glue_database():
database_name = get_time_str_with_random_suffix()
yield database_name
wr.catalog.delete_database(database_name)
| 32.89899 | 117 | 0.691331 |
4a22a76ff0108c8ce615dc4bdbab179b07f6469a | 10,463 | py | Python | trainrecs.py | cielsys/CarNDProj4_CarBehave | 996802cdad6997ce31757e2b7f000e6522446365 | [
"MIT"
] | null | null | null | trainrecs.py | cielsys/CarNDProj4_CarBehave | 996802cdad6997ce31757e2b7f000e6522446365 | [
"MIT"
] | null | null | null | trainrecs.py | cielsys/CarNDProj4_CarBehave | 996802cdad6997ce31757e2b7f000e6522446365 | [
"MIT"
] | null | null | null | import os
import errno
import csv
import random
import math
import datetime
import pickle
import threading
import cv2
import numpy as np
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#====================== CTrainingRecord() =====================
class CTrainingRecord():
'''
Convenience class for holding all of the neccesary info
for a single image.
'''
# ----------------------- ctor
def __init__(self, fileName, steeringAngle, whichCam, doMirror):
self.fileName = fileName
self.steeringAngle = steeringAngle
self.whichCam = whichCam
self.doMirror = doMirror
# --------------------------------- GetImage()
def GetImage(self):
'''
Read the corresponding image into standard numpy
HxWxRBG array. Mirror the image if specified.
:return:
'''
img = mpimg.imread(self.fileName)
if self.doMirror:
img = np.fliplr(img)
return(img)
# --------------------------------- GetImageShape()
def GetImageShape(self):
img = mpimg.imread(self.fileName)
shape = img.shape()
return(shape)
# --------------------------------- PlotImage
def PlotImage(self, figtitle=None):
if figtitle is None:
figtitle = "{}".format(self.fileName)
img = self.GetImage()
axTitle = "cam={} mir={} steer={} shape={}".format(self.whichCam, self.doMirror, self.steeringAngle, img.shape)
PlotImage(img, figtitle, axTitle)
# --------------------------------- PlotImage
def PlotImage(img, figtitle, axTitle):
figsize = (8, 4)
fig = plt.figure(figsize=figsize)
fig.suptitle(figtitle, fontsize=10)
ax = plt.gca()
ax.imshow(img, interpolation='sinc')
ax.set_title(axTitle, fontsize=8)
plt.tight_layout(pad=2)
plt.show()
#--------------------------------- SimpleMultiImage
def SimpleMultiImage(imgInList, figtitle="TestImg"):
figsize = (9, 3)
plotNumCols= len(imgInList)
plotNumRows = 1
fig, axes = plt.subplots(plotNumRows, plotNumCols, figsize=figsize, )#subplot_kw={'xticks': [], 'yticks': []})
fig.suptitle(figtitle, fontsize=16)
for (imageIndex, (ax, imgOut)) in enumerate(zip(axes.flat, imgInList)):
#imgOut = imgInList[imageIndex]
title = "img[{}]".format(imageIndex)
#ax.set_ylabel(ylabel, fontsize=9)
ax.set_title(title, fontsize=12)
ax.imshow(imgOut, interpolation='sinc') #dsIn.X[imageIndex]
plt.tight_layout() # (pad=-0.75 w_pad=0.5, h_pad=1.0)
plt.show()
#--------------------------------- ReadCSVFile()
def CSVRowRawToFields(csvRowRaw):
(fnameCenter, fnameLeft, fnameRight, steeringStr, throttleStr, brakeStr, speedStr) = csvRowRaw
return (fnameCenter, fnameLeft, fnameRight, steeringStr, throttleStr, brakeStr, speedStr)
#--------------------------------- ReadCSVFile()
def ReadCSVFile(csvFileNameIn, limitSize=0):
'''
Read each row of the image meta data CSV and fixup the filenames.
This is because the filenames, in older versions of the recorder,
included full path info that was system dependent.
This function assumes that the file basesnames can be found
in a directory named "IMG" residing in the same folder as
the CSV file.
:param csvFileNameIn:
:param limitSize: Use 0 for normal operation. Positive values are used for dev/debug only
:return: A list of filepath fixed CSV row records
'''
print("Reading CSV file: '{}'... ".format(csvFileNameIn), end='', flush=True)
csvFilePath = os.path.dirname(csvFileNameIn)
imageDir = csvFilePath + "/IMG/"
csvRowsRaw = []
numRows = 0
with open(csvFileNameIn, mode='r') as infile:
infile.readline() # Skip header line 0
reader = csv.reader(infile)
for csvRowRaw in csv.reader(infile):
(fnameCenter, fnameLeft, fnameRight, steeringStr, throttleStr, brakeStr, speedStr) = CSVRowRawToFields(csvRowRaw)
csvRowRaw[0] = imageDir + os.path.basename(fnameCenter)
csvRowRaw[1] = imageDir + os.path.basename(fnameLeft)
csvRowRaw[2] = imageDir + os.path.basename(fnameRight)
csvRowsRaw.append(csvRowRaw)
numRows+=1
if (limitSize > 0) and (limitSize == numRows):
print("LIMITING INPUT to {} rows. ".format(limitSize), end='')
break
print("Done reading {} rows".format(numRows))
return (csvRowsRaw)
#--------------------------------- CSVRawRowsToTrainingRecs()
def CSVRawRowsToTrainingRecs(csvRowsRaw, camAngleCorrection):
'''
Read each CSV row record and demultiplex/create
an independent CTrainingRecord for each of the 3 camera images.
Side camera images adjust the steering angle by camAngleCorrection
:param csvRowsRaw:
:param camAngleCorrection:
:return: 2 Lists of CTrainingRecords, one of Center cam and one of Side cams
'''
trainRecsCenter = []
trainRecsSides = []
for csvRowRaw in csvRowsRaw:
(fnameCenter, fnameLeft, fnameRight, steeringStr, throttleStr, brakeStr, speedStr) = CSVRowRawToFields(csvRowRaw)
steeringVal = float(steeringStr)
trainRecsCenter.append(CTrainingRecord(fnameCenter, steeringVal, whichCam="center", doMirror=False))
trainRecsSides.append(CTrainingRecord(fnameLeft, steeringVal + camAngleCorrection, whichCam="left", doMirror=False))
trainRecsSides.append(CTrainingRecord(fnameRight, steeringVal - camAngleCorrection, whichCam="right", doMirror=False))
return(trainRecsSides, trainRecsCenter)
#--------------------------------- RecordToString()
def RecordToString(rec):
'''
Pretty print formatting of a single training Record.
:param rec:
:return:
'''
recStr = "fn={:>75}, steer= {:+0.2f}, cam= {:>6}, doMirror= {}".format(rec.fileName,rec.steeringAngle, rec.whichCam, rec.doMirror)
return(recStr)
#--------------------------------- PrintRecords()
def PrintRecords(trainRecs, numShow=0):
'''
Dev debug utility for inspecting lists of trainRecs.
:param trainRecs:
:param numShow: Number of recs to show, or 0 for all
'''
numShow = len(trainRecs) if (numShow <= 0 or numShow > len(trainRecs)) else numShow
print("Showing {} of {} trainingRecords".format(numShow, len(trainRecs)))
for recIndex, rec in enumerate(trainRecs):
recStr = RecordToString(rec)
print("recs[{:6}]: {}".format(recIndex, recStr))
if (numShow == recIndex+1):
break
print()
#--------------------------------- GeneratorThreadWrapper()
def GeneratorThreadWrapper(gen):
'''
Never used. This was to allow model.fit_generator() multi workers
to improve CPU utilization during training.
:param gen:
:return:
'''
lock = threading.Lock()
while True:
try:
with lock:
x, y = next(gen)
except StopIteration:
return
yield x, y
#--------------------------------- TrainRecordBatchGenerator()
# This code is for experimental HSV conversion
#doConvertToHSV = False
#if doConvertToHSV:
# imgCur = matplotlib.colors.rgb_to_hsv(imgCur)
def TrainRecordBatchGenerator(trainRecs, batchSize, cropTBLR=None):
'''
This generator supplies batches of X,y training images
to keras.model.fit_generator() in model.py::Main()
:param trainRecs: A list of training records to extract X,y batches from
:param batchSize: Number of X,y values to provide per next()
:param cropTBLR: X Image cropping spec
:return: 2 batchSize Lists of X images, y steering angles
'''
numRecs = len(trainRecs)
while True: # Never runs out - recycles the training recs if needed
np.random.shuffle(trainRecs)
for offset in range(0, numRecs, batchSize):
trainRecsBatch = trainRecs[offset : offset + batchSize]
XBatch = []
yBatch = []
for batchItemIndex, trainRecCur in enumerate(trainRecsBatch):
imgCur = trainRecCur.GetImage() # Takes care of mirror internally, if needed
# Crop the original image to Region of Interest
if cropTBLR is not None:
imgCur = imgCur[cropTBLR[0]: - cropTBLR[1], :, :]
XBatch.append(imgCur)
yBatch.append(trainRecCur.steeringAngle)
XBatch = np.array(XBatch)
yBatch = np.array(yBatch)
yield (XBatch, yBatch)
#====================== Main() =====================
def DevDebugMain(csvFileNameIn):
'''
This Main() is for dev debug only. This file is not normally called directly.
:param csvFileNameIn:
:return:
'''
#dir = "/home/cl/AAAProjects/AAAUdacity/carND/Proj4_CarBehave/Proj4Root/Assets/writeupImages/"
#left=mpimg.imread(dir + "rawLeft.png")
#center=mpimg.imread(dir + "rawCenter.png")
#right=mpimg.imread(dir + "rawRight.png")
#SimpleMultiImage([left, center, right], figtitle="Raw Left, Center, Right")
limitSize = 0
camAngleCorrection = 0.2
csvRowsRaw = ReadCSVFile(csvFileNameIn, limitSize=limitSize)
csvRowsRaw = csvRowsRaw[7833:7834]
trainRecsSides, trainRecsCenter = CSVRawRowsToTrainingRecs(csvRowsRaw, camAngleCorrection=camAngleCorrection)
PrintRecords(trainRecsSides, numShow=10)
trainRecsCenterMirror = [CTrainingRecord(rec.fileName, -rec.steeringAngle, rec.whichCam, not rec.doMirror) for rec in trainRecsCenter]
trainRecsFull = trainRecsCenter + trainRecsCenterMirror
#PrintRecords(trainRecsFull, numShow=10)
#np.random.shuffle(trainRecsFull)
PrintRecords(trainRecsFull, numShow=10)
#trainRecsSides[0].PlotImage()
#trainRecsCenter[0].PlotImage()
#trainRecsSides[1].PlotImage()
imgCenterNoMirror = trainRecsFull[0].GetImage()
imgCenterMirror = trainRecsFull[1].GetImage()
cropTBLR = (60, 10, 0, 0)
imgCenterNoMirror = imgCenterNoMirror[cropTBLR[0]: - cropTBLR[1], :, :]
imgCenterMirror = imgCenterMirror[cropTBLR[0]: - cropTBLR[1], :, :]
SimpleMultiImage([imgCenterNoMirror, imgCenterMirror], figtitle="Center, NoMirror and Mirror. Cropped")
#====================== Main Invocation =====================
if ((__name__ == '__main__')):
trainingFilesDirIn = "./Assets/trainingdata/"
csvFileNameIn = trainingFilesDirIn + "driving_log.csv"
DevDebugMain(csvFileNameIn) | 36.583916 | 138 | 0.636051 |
4a22aa54f2a48a2877243922b184f255cfffb079 | 593 | py | Python | scripts/bump_alpha.py | JarbasAl/jarbas-wake-word-plugin-snowboy | 09b9dea50ec2c40df37f5e4d0ed2faa1eef304cd | [
"Apache-2.0"
] | null | null | null | scripts/bump_alpha.py | JarbasAl/jarbas-wake-word-plugin-snowboy | 09b9dea50ec2c40df37f5e4d0ed2faa1eef304cd | [
"Apache-2.0"
] | null | null | null | scripts/bump_alpha.py | JarbasAl/jarbas-wake-word-plugin-snowboy | 09b9dea50ec2c40df37f5e4d0ed2faa1eef304cd | [
"Apache-2.0"
] | null | null | null | import fileinput
from os.path import join, dirname
version_file = join(dirname(dirname(__file__)), "ovos_ww_plugin_snowboy", "version.py")
version_var_name = "VERSION_ALPHA"
with open(version_file, "r", encoding="utf-8") as v:
for line in v.readlines():
if line.startswith(version_var_name):
version = int(line.split("=")[-1])
new_version = int(version) + 1
for line in fileinput.input(version_file, inplace=True):
if line.startswith(version_var_name):
print(f"{version_var_name} = {new_version}")
else:
print(line.rstrip('\n'))
| 31.210526 | 87 | 0.674536 |
4a22abb5369728a9de3e212c9131384739ec7dde | 2,053 | py | Python | tests/functional/fkey/primary/test_insert_pk_16.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/functional/fkey/primary/test_insert_pk_16.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/functional/fkey/primary/test_insert_pk_16.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: functional.fkey.primary.insert_pk_16
# title: Check correct work fix with foreign key
# decription: Check foreign key work.
# Master transaction inserts record into master_table and commit.
# Detail transaction inserts record in detail_table.
# Expected: no errors.
# tracker_id:
# min_versions: []
# versions: 2.1
# qmid: functional.fkey.primary.ins_16
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1
# resources: None
substitutions_1 = []
init_script_1 = """CREATE TABLE MASTER_TABLE (
ID INTEGER PRIMARY KEY,
INT_F INTEGER
);
CREATE TABLE DETAIL_TABLE (
ID INTEGER PRIMARY KEY,
FKEY INTEGER
);
ALTER TABLE DETAIL_TABLE ADD CONSTRAINT FK_DETAIL_TABLE FOREIGN KEY (FKEY) REFERENCES MASTER_TABLE (ID);
COMMIT;"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# TPB_master = (
# chr(kdb.isc_tpb_write)
# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version)
# + chr(kdb.isc_tpb_nowait)
# )
# TPB_detail = (
# chr(kdb.isc_tpb_write)
# + chr(kdb.isc_tpb_read_committed) + chr(kdb.isc_tpb_rec_version)
# + chr(kdb.isc_tpb_nowait)
# )
# db_conn.begin(tpb=TPB_master)
# cm_1 = db_conn.cursor()
# cm_1.execute('INSERT INTO MASTER_TABLE (ID, INT_F) VALUES (1, 10)')
# db_conn.commit()
#
# #Create second connection for change detail table
# con_detail = kdb.connect(
# dsn=dsn.encode(),
# user=user_name.encode(),
# password=user_password.encode()
# )
#
# try:
# con_detail.begin(tpb=TPB_detail)
# cd = con_detail.cursor()
# cd.execute("INSERT INTO DETAIL_TABLE (ID, FKEY) VALUES (1,1)")
# con_detail.commit()
# except Exception, e:
# print (e[0])
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=2.1')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| 26.320513 | 104 | 0.657087 |
4a22ac989a0fa8847d054cc298629b531867d9ba | 2,037 | py | Python | mozillians/announcements/models.py | mozilla/vouched-mozillians | 88fca9aea0ab1e173cbc33776aa388b956859559 | [
"BSD-3-Clause"
] | 1 | 2020-10-27T12:17:34.000Z | 2020-10-27T12:17:34.000Z | mozillians/announcements/models.py | akatsoulas/vouched-mozillians | 6dcfaf61518ff038403b2b3e06ad9b813135b287 | [
"BSD-3-Clause"
] | 5 | 2020-09-28T19:04:19.000Z | 2020-10-27T19:48:31.000Z | mozillians/announcements/models.py | akatsoulas/vouched-mozillians | 6dcfaf61518ff038403b2b3e06ad9b813135b287 | [
"BSD-3-Clause"
] | 2 | 2020-09-22T08:55:10.000Z | 2020-09-24T10:40:58.000Z | import os
import uuid
from jinja2 import Markup
import bleach
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.timezone import now
from mozillians.announcements.managers import AnnouncementManager
from sorl.thumbnail import ImageField
ALLOWED_TAGS = ["em", "strong", "a", "u"]
def _calculate_image_filename(instance, filename):
"""Generate a unique filename for uploaded image."""
return os.path.join(settings.ANNOUNCEMENTS_PHOTO_DIR, str(uuid.uuid4()) + ".jpg")
class Announcement(models.Model):
objects = AnnouncementManager()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=255)
text = models.TextField(max_length=750)
image = ImageField(
default="",
blank=True,
help_text=(
"60x60 pixel image recommended. Image "
"will be rescaled automatically to "
"a square."
),
upload_to=_calculate_image_filename,
)
publish_from = models.DateTimeField(help_text="Timezone is %s" % settings.TIME_ZONE)
publish_until = models.DateTimeField(
blank=True, null=True, help_text="Timezone is %s" % settings.TIME_ZONE
)
def clean(self):
self.text = bleach.clean(self.text, tags=ALLOWED_TAGS, strip=True)
if self.publish_until and self.publish_until < self.publish_from:
raise ValidationError("Publish until must come after publish from.")
@property
def published(self):
_now = now()
return (self.publish_from <= _now) and (
self.publish_until > _now if self.publish_until else True
)
def get_template_text(self):
"""Mark text as template safe so html tags are not escaped."""
return Markup(self.text)
def __unicode__(self):
return self.title
class Meta:
ordering = ["-publish_from"]
get_latest_by = "publish_from"
| 31.338462 | 88 | 0.68434 |
4a22ace72ad88db6e6ec1e05466e8f9ae85598c4 | 13,304 | py | Python | models.py | antonyvigouret/Text-Orientation-Detection | 85ead7573a199f80176653035adaad6208713ad7 | [
"MIT"
] | 1 | 2020-08-01T21:19:37.000Z | 2020-08-01T21:19:37.000Z | models.py | antonyvigouret/Text-Recognition | 85ead7573a199f80176653035adaad6208713ad7 | [
"MIT"
] | null | null | null | models.py | antonyvigouret/Text-Recognition | 85ead7573a199f80176653035adaad6208713ad7 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.layers import (
Dense,
Conv2D,
Bidirectional,
LSTM,
MaxPooling2D,
BatchNormalization,
Activation,
Input,
Lambda,
Add,
)
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from utils import ALPHABET
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
### Subclass implementation of the CRNN model
class CRNN(tf.keras.Model):
def __init__(self, alphabet, input_shape=(32, None, 3)):
super(CRNN, self).__init__(name = "CRNN")
self.alphabet = alphabet
self.input_layer = Input(input_shape)
self.conv1 = Conv2D(64, 3, padding="same", activation="relu", name="conv2d_1")
self.max_pool1 = MaxPooling2D((2, 2), (2, 2), name="pool2d_1")
self.conv2 = Conv2D(128, 3, padding="same", activation="relu", name="conv2d_2")
self.max_pool2 = MaxPooling2D((2, 2), (2, 2), name="pool2d_2")
self.conv3 = Conv2D(256, 3, padding="same", activation="relu", name="conv2d_3")
self.conv4 = Conv2D(256, 3, padding="same", activation="relu", name="conv2d_4")
self.max_pool4 = MaxPooling2D((2, 1), (2, 1), name="pool2d_4")
self.conv5 = Conv2D(512, 3, padding="same", activation="relu", name="conv2d_5")
self.batch_norm5 = BatchNormalization(name="batch_norm_5")
self.conv6 = Conv2D(512, 3, padding="same", activation="relu", name="conv2d_6")
self.batch_norm6 = BatchNormalization(name="batch_norm_6")
self.max_pool6 = MaxPooling2D((2, 1), (2, 1), name="pool2d_6")
self.conv7 = Conv2D(512, 2, padding="valid", activation="relu", name="conv2d_7")
self.bidiLSTM1 = Bidirectional(LSTM(256, return_sequences=True), name="bidirectional_1")
self.bidiLSTM2 = Bidirectional(LSTM(256, return_sequences=True), name="bidirectional_2")
self.dense = Dense(len(self.alphabet) + 1)
self.out = self.call(self.input_layer, training=False)
super(CRNN, self).__init__(
inputs=self.input_layer,
outputs=self.out)
def call(self, inputs, training=True):
#[?, 32, W, 1] -> [?, 32, W, 64] -> [?, 16, W/2, 1]
x = self.conv1(inputs)
x = self.max_pool1(x)
#[?, 16, W/2, 1] -> [?, 16, W/2, 128] -> [?, 8, W/4, 128]
x = self.conv2(x)
x = self.max_pool2(x)
#[?, 8, W/4, 128] -> [?, 8, W/4, 256]
x = self.conv3(x)
#[?, 8, W/4, 256] -> [?, 8, W/2, 256] -> [?, 4, W/4, 256]
x = self.conv4(x)
x = self.max_pool4(x)
#[?, 4, W/4, 512] -> [?, 4, W/4, 512]
x = self.conv5(x)
x = self.batch_norm5(x)
#[?, 4, W/4, 512] -> [?, 4, W/4, 512] -> [?, 2, W/4, 512]
x = self.conv6(x)
x = self.batch_norm6(x)
x = self.max_pool6(x)
# [?, 2, W/4, 512] -> [?, 1, W/4-3, 512]
x = self.conv7(x)
x = tf.squeeze(x, axis=1)
# [batch, width_seq, depth_chanel]
x = self.bidiLSTM1(x)
x = self.bidiLSTM2(x)
logits = self.dense(x)
y_pred = Activation("softmax", name="softmax")(logits)
return y_pred
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x["the_input"], training=True) # Forward pass
y_pred = y_pred[:, 2:, :]
loss = tf.reduce_mean(ctc_lambda_func((y_pred, x["the_labels"], tf.reshape(x["input_length"], [-1, 1]), tf.reshape(x["label_length"], [-1, 1]))))
print(loss)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
def build(self):
# Initialize the graph
self._is_graph_network = True
self._init_graph_network(
inputs=self.input_layer,
outputs=self.out
)
def get_CRNN(weights=None):
input_data = Input(name="the_input", shape=(32, None, 3), dtype="float32")
inner = Conv2D(32, 3, padding="same", kernel_initializer="he_normal", name="conv1")(
input_data
)
inner = BatchNormalization()(inner)
inner = Activation("relu")(inner)
inner = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="max1")(inner)
inner = Conv2D(64, 3, padding="same", kernel_initializer="he_normal", name="conv2")(
inner
)
inner = BatchNormalization()(inner)
inner = Activation("relu")(inner)
inner = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="max2")(inner)
inner = Conv2D(
128, 3, padding="same", kernel_initializer="he_normal", name="conv3"
)(inner)
inner = BatchNormalization()(inner)
inner = Activation("relu")(inner)
inner = MaxPooling2D(pool_size=(2, 1), strides=(2, 1), name="max3")(inner)
inner = Conv2D(
256, 3, padding="same", kernel_initializer="he_normal", name="conv4"
)(inner)
inner = BatchNormalization()(inner)
inner = Activation("relu")(inner)
inner = MaxPooling2D(pool_size=(2, 1), strides=(2, 1), name="max4")(inner)
inner = Conv2D(
256, 3, padding="same", kernel_initializer="he_normal", name="conv5"
)(inner)
inner = BatchNormalization()(inner)
inner = Activation("relu")(inner)
inner = MaxPooling2D(pool_size=(2, 1), strides=(2, 1), name="max5")(inner)
inner = tf.squeeze(inner, axis=1)
# stack of 3 bidi lstm
inner = Bidirectional(LSTM(256, return_sequences=True))(inner)
inner = Bidirectional(LSTM(256, return_sequences=True))(inner)
inner = Bidirectional(LSTM(512, return_sequences=True))(inner)
# transforms RNN output to character activations:
alphabet_size = 107
inner = Dense(alphabet_size, kernel_initializer="he_normal", name="dense2")(inner)
y_pred = Activation("softmax", name="softmax")(inner)
Model(inputs=input_data, outputs=y_pred).summary()
labels = Input(name="the_labels", shape=[None], dtype="float32")
input_length = Input(name="input_length", shape=[1], dtype="int64")
label_length = Input(name="label_length", shape=[1], dtype="int64")
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")(
[y_pred, labels, input_length, label_length]
)
# clipnorm seems to speeds up convergence
optimizer = Adam(learning_rate=0.001, decay=1e-6)
model = Model(
inputs=[input_data, labels, input_length, label_length], outputs=loss_out
)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={"ctc": lambda y_true, y_pred: y_pred}, optimizer=optimizer)
if weights:
model.load_weights(weights)
# captures output of softmax so we can decode the output during visualization
test_func = K.function([input_data], [y_pred])
return model, test_func
### Keras functional API implementation of the CRNN model
def CRNN_model(weights=None):
inputs = Input(name="the_input", shape=(32, None, 3), dtype="float32")
x = Conv2D(64, 3, padding="same", name="conv2d_0")(inputs)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D((2, 2), (2, 2), name="pool2d_0")(x)
x = Conv2D(128, 3, padding="same", name="conv2d_1")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D((2, 2), (2, 2), name="pool2d_1")(x)
x = Conv2D(256, 3, padding="same", name="conv2d_2")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D((2, 1), (2, 1), name="pool2d_2")(x)
x = Conv2D(512, 3, padding="same", name="conv2d_3")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D((2, 1), (2, 1), name="pool2d_3")(x)
x = Conv2D(512, 3, padding="same", name="conv2d_4")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = MaxPooling2D((2, 1), (2, 1), name="pool2d_4")(x)
x = tf.squeeze(x, axis=1)
# [batch width_seq depth_chanel]
x = Bidirectional(LSTM(256, return_sequences=True), name="bidirectional_1")(x)
x = Bidirectional(LSTM(256, return_sequences=True), name="bidirectional_2")(x)
x = LSTM(512, return_sequences=True)(x)
x = Dense(len(ALPHABET) + 1)(x)
y_pred = Activation("softmax", name="softmax")(x)
Model(inputs=inputs, outputs=y_pred).summary()
labels = Input(name="the_labels", shape=[None], dtype="float32")
input_length = Input(name="input_length", shape=[1], dtype="int64")
label_length = Input(name="label_length", shape=[1], dtype="int64")
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")(
[y_pred, labels, input_length, label_length]
)
sgd = Adam(learning_rate=0.001)
model = Model(inputs=[inputs, labels, input_length, label_length], outputs=loss_out)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={"ctc": lambda y_true, y_pred: y_pred}, optimizer=sgd)
if weights:
model.load_weights(weights)
test_func = K.function([inputs], [y_pred])
return model, test_func
def get_CResRNN(weights=None):
inputs = Input(name="the_input", shape=(32, None, 3), dtype="float32")
x = Conv2D(64, 7, padding="same", name="conv2d_0")(inputs)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x_orig = x
x = Conv2D(64, 3, padding="same", name="conv2d_0_1")(inputs)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(64, 3, padding="same", name="conv2d_0_2")(inputs)
x = BatchNormalization()(x)
x = Add()([x, x_orig])
x = Activation("relu")(x)
x = MaxPooling2D((2, 2), (2, 2), name="pool2d_0")(x)
x = Conv2D(128, 3, padding="same", name="conv2d_1_0")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x_orig = x
x = Conv2D(128, 3, padding="same", name="conv2d_1_1")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(128, 3, padding="same", name="conv2d_1_2")(x)
x = BatchNormalization()(x)
x = Add()([x, x_orig])
x = Activation("relu")(x)
x = MaxPooling2D((2, 2), (2, 2), name="pool2d_1")(x)
x = Conv2D(256, 3, padding="same", name="conv2d_2_0")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x_orig = x
x = Conv2D(256, 3, padding="same", name="conv2d_2_1")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(256, 3, padding="same", name="conv2d_2_2")(x)
x = BatchNormalization()(x)
x = Add()([x, x_orig])
x = Activation("relu")(x)
x = MaxPooling2D((2, 1), (2, 1), name="pool2d_2")(x)
x = Conv2D(512, 3, padding="same", name="conv2d_3_0")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x_orig = x
x = Conv2D(512, 3, padding="same", name="conv2d_3_1")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(512, 3, padding="same", name="conv2d_3_2")(x)
x = BatchNormalization()(x)
x = Add()([x, x_orig])
x = Activation("relu")(x)
x = MaxPooling2D((2, 1), (2, 1), name="pool2d_3")(x)
x_orig = x
x = Conv2D(512, 3, padding="same", name="conv2d_4_1")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(512, 3, padding="same", name="conv2d_4_2")(x)
x = BatchNormalization()(x)
x = Add()([x, x_orig])
x = Activation("relu")(x)
x = MaxPooling2D((2, 1), (2, 1), name="pool2d_4")(x)
x = tf.squeeze(x, axis=1)
# [batch width_seq depth_chanel]
x = Bidirectional(LSTM(256, return_sequences=True), name="bidirectional_1")(x)
x = Bidirectional(LSTM(256, return_sequences=True), name="bidirectional_2")(x)
x = LSTM(512, return_sequences=True)(x)
x = Dense(len(ALPHABET) + 1)(x)
y_pred = Activation("softmax", name="softmax")(x)
Model(inputs=inputs, outputs=y_pred).summary()
labels = Input(name="the_labels", shape=[None], dtype="float32")
input_length = Input(name="input_length", shape=[1], dtype="int64")
label_length = Input(name="label_length", shape=[1], dtype="int64")
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")(
[y_pred, labels, input_length, label_length]
)
sgd = Adam(learning_rate=0.0001,)
model = Model(inputs=[inputs, labels, input_length, label_length], outputs=loss_out)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={"ctc": lambda y_true, y_pred: y_pred}, optimizer=sgd)
if weights:
model.load_weights(weights)
test_func = K.function([inputs], [y_pred])
return model, test_func
| 32.687961 | 157 | 0.619738 |
4a22adb132ab1e93bb2141f1fd44e5b72088716f | 9,513 | py | Python | scripts/lib/xpedite/jupyter/driver.py | robber-m/Xpedite | 18e58bd0fac9ec7620fdeae1e7d1c0f8dd1f03f7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 99 | 2018-06-29T23:22:36.000Z | 2020-12-10T23:40:10.000Z | scripts/lib/xpedite/jupyter/driver.py | robber-m/Xpedite | 18e58bd0fac9ec7620fdeae1e7d1c0f8dd1f03f7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 105 | 2018-07-01T23:10:15.000Z | 2020-09-26T16:40:36.000Z | scripts/lib/xpedite/jupyter/driver.py | Mani-D/Xpedite | ce0811c0679e797029b3828a348fba07c6c92f77 | [
"Apache-2.0",
"BSD-3-Clause"
] | 31 | 2018-06-30T00:28:46.000Z | 2020-11-06T07:29:03.000Z | """
Driver to integrate Xpedite with Jupyter shell.
This module provides
1. logic to store xpedite results in a jupyter notebook
2. ability to launch a jupyter instance with xpedite specific configuration
3. code to generate histogram and profile summary cells
Author: Dhruv Shekhawat, Morgan Stanley
"""
import os
import time
import copy
import logging
import tempfile
import nbformat
from enum import Enum
from nbformat import v4 as nbf
from xpedite.util import formatHumanReadable
from xpedite.util import compressText
from xpedite.types import InvariantViloation
from xpedite.jupyter.context import Context
from xpedite.jupyter import PROFILES_KEY
LOGGER = logging.getLogger(__name__)
class Action(Enum):
"""Enumeration for jupyte shell actions"""
def __str__(self):
return str(self.value)
Init = 'init'
Load = 'load'
class D3Flot(object):
"""Holds data to build d3 historgram visualization"""
def __init__(self):
self.xAxisValues = []
self.xAxisLabels = []
self.xyCoords = []
self.legends = []
def toDict(self):
"""Returns a dict representation of this object"""
d3FlotDict = {'xAxisValues': self.xAxisValues, 'xAxisLabels': self.xAxisLabels,
'xyCoords' : self.xyCoords, 'legends' : self.legends
}
return d3FlotDict
def buildD3Flot(cell):
"""
Method to store data for flot generation. In flot.js, xAxisValues are used
to locate where exactly xAxisLabels need to be placed. xyCoords is a list of
(x,y) dicts created by interleaving the (x,y) data of every run in order.
This helps sort the x axis values and compare the bars of different run's at
close proximity(in order, side by side). As we are guaranteed to have runs
placed in order, their coloring can be accomplished using formula:
barColor = colorList[barNum % num of runs].
Returns a d3Flot object.
"""
d3Flot = D3Flot()
for tick in cell.flot.options['xaxis']['ticks']:
d3Flot.xAxisValues.append(tick[0])
d3Flot.xAxisLabels.append(tick[1])
for coord in range(0, len(cell.flot.data[0]['data'])):
for run, _ in enumerate(cell.flot.data):
xyCoord = {}
xyCoord['x'] = cell.flot.data[run]['data'][coord][0]
xyCoord['y'] = cell.flot.data[run]['data'][coord][1]
d3Flot.xyCoords.append(xyCoord)
for legend in cell.flot.data:
d3Flot.legends.append(legend['label'])
return d3Flot
def buildReportLink(reportKey, action):
"""Returns an url to uniquely identify a report"""
return '/xpedite?{}={{0}}&reportKey={}&action={}'.format(Context.notebookPathKey, reportKey, action)
def buildReportCells(nb, result, dataFilePath):
"""
Method to build the report cells. Populates the
metadata to be stored in init cell and preloads
source code for creating flots and html links
Returns the total num of categories in a run.
"""
from xpedite.jupyter.snippetsBuilder import buildSnippets
from xpedite.jupyter.xpediteData import XpediteDataFactory
from xpedite.jupyter.templates import loadCategoryMarkup
nb['cells'] = []
d3Flots = []
flotCode = loadCategoryMarkup()
reportCount = 0
xpdf = XpediteDataFactory(dataFilePath)
xpdf.appendRecord('envReport', 'environment report', result.envReport.zContent)
xpdProfiles = copy.deepcopy(result.profiles)
xpdProfiles.transactionRepo = None
xpdf.appendRecord(PROFILES_KEY, 'xpedite profiles', xpdProfiles)
# create and compress snippets
snippetData = buildSnippets(xpdProfiles)
zSnippetData = compressText(snippetData)
xpdf.appendRecord('snippets', 'snippets', zSnippetData)
cellNum = None
for cellNum, cell in enumerate(result.reportCells):
linksCode = ''
d3Flot = buildD3Flot(cell)
# populate create html links for reports
reportNum = None
for reportNum, report in enumerate(cell.htmlReport):
reportCount += 1
xpdKey = 'report{}'.format(reportCount)
linksCode += '<li><a href={} target="_blank">{}</a></li>'.format(
buildReportLink(xpdKey, Action.Load), report.description
)
xpdf.appendRecord(xpdKey, 'htmlReport', report.zContent)
# populate init cell metadata
d3Flots.append(d3Flot.toDict())
# fill notebook cells with flot + report links code
try:
cellCode = flotCode.format(
name=cell.flot.title, description=cell.flot.description,
cellNum=cellNum, reportNum=reportNum + 1, linksCode=linksCode
)
except TypeError:
typeErr = 'Number of placeholders in cell code string do not match the number of args supplied'
LOGGER.exception(typeErr)
raise InvariantViloation(typeErr)
nb['cells'].append(
nbf.new_code_cell(source=cellCode, metadata={
'init_cell': True, 'hide_input': True, 'editable': False, 'deletable': True
})
)
xpdf.commit()
return cellNum, d3Flots
def buildInitCell(nb, numOfCategories, d3Flots, appName, runId):
"""
Method to build the init cell which contains the intro,
serialized transactions object and metadata for generating reports
"""
from xpedite.jupyter.templates import loadInitCell
initCode = loadInitCell()
try:
envLink = buildReportLink('envReport', Action.Load)
initCode = initCode.format(
envLink=envLink, appName=appName, categoryCount=numOfCategories + 1, runId=runId
)
except TypeError:
typeErr = 'Number of placeholders in init code string do not match the number of args supplied'
LOGGER.exception(typeErr)
raise InvariantViloation(typeErr)
nb['cells'] = [nbf.new_code_cell(source=initCode, metadata={'init_cell': True, 'isInit': '0xFFFFFFFFA5A55A5DUL',\
'hide_input': True, 'editable': False, 'deletable': False,\
'd3Flots': d3Flots})] + nb['cells']
def buildNotebook(appName, result, notebookPath, dataFilePath, runId):
"""
Method to build .ipynb notebook with init code
cell for profiles and one report cell per category.
"""
begin = time.time()
LOGGER.info('generating notebook %s -> ', os.path.basename(notebookPath))
nb = nbf.new_notebook()
numOfCategories, d3Flots = buildReportCells(nb, result, dataFilePath)
buildInitCell(nb, numOfCategories, d3Flots, appName, runId)
try:
with open(notebookPath, 'w') as reportFile:
nbformat.write(nb, reportFile)
notebookSize = formatHumanReadable(os.path.getsize(notebookPath))
elapsed = time.time() - begin
LOGGER.completed('completed %s in %0.2f sec.', notebookSize, elapsed)
return True
except IOError:
LOGGER.exception('Could not write to the notebook(.ipynb) file')
return False
def launchJupyter(homeDir):
"""
Method to set env vars for overriding jup config, adding
python path and extensions, and finally launching jupyter
"""
from xpedite.jupyter import SHELL_PREFIX
from xpedite.dependencies import binPath
LOGGER.info('')
pyPath = os.path.dirname(binPath('python')) + os.pathsep + os.environ['PATH']
initPath = os.path.dirname(__file__)
jupyterEnv = os.environ
jupyterEnv[Context.xpediteHomeKey] = os.path.abspath(homeDir)
jupyterEnv['JUPYTER_PATH'] = os.path.join(initPath, 'data/extensions/')
jupyterEnv['JUPYTER_CONFIG_DIR'] = os.path.join(initPath, 'data/config/')
jupyterEnv['XPEDITE_PATH'] = os.path.abspath(os.path.join(initPath, '../../'))
jupyterEnv['PATH'] = pyPath
jupyterEnv['HOME'] = tempfile.mkdtemp(prefix=SHELL_PREFIX, dir='/tmp')
jupyterBinary = binPath('jupyter')
os.execle(jupyterBinary, 'Xpedite', 'notebook', '--no-browser', '--notebook-dir='+homeDir, jupyterEnv)
def validatePath(homeDir, reportName):
"""Validates the path to store xpedite notebook and data files"""
from xpedite.jupyter import DATA_DIR, DATA_FILE_EXT, TEMP_PREFIX, NOTEBOOK_EXT
if homeDir is None:
homeDir = tempfile.mkdtemp(prefix=TEMP_PREFIX, dir='/tmp')
LOGGER.warn('Xpedite home directory not found in profileInfo (using temp dir).\n'
'To keep all reports in one place, set variable homeDir in profileInfo to a valid path.')
dataDir = os.path.join(homeDir, DATA_DIR)
notebookPath = '{}/{}{}'.format(homeDir, reportName, NOTEBOOK_EXT)
dataFilePath = '{}/{}{}'.format(dataDir, reportName, DATA_FILE_EXT)
if os.path.isfile(notebookPath) or os.path.isfile(dataFilePath):
errMsg = """Can't overwirte existing files. check and remove
\t\t 1. Notebook file - {}
\t\t 2. Xpedite data file - {}""".format(notebookPath, dataFilePath)
LOGGER.error(errMsg)
raise Exception(errMsg)
if not os.path.exists(dataDir):
LOGGER.info('creating xpedite data directory %s', dataDir)
os.makedirs(dataDir)
return notebookPath, dataFilePath, homeDir
class Driver(object):
"""Xpedite driver to render profile results in jupyter shell"""
@staticmethod
def render(profileInfo, report, leanReports=None, cprofile=None): # pylint: disable=unused-argument
"""Runs a profile session and renders results in a jupyter shell"""
from xpedite.jupyter.result import Result
result = Result(report)
notebookPath, dataFilePath, profileInfo.homeDir = validatePath(profileInfo.homeDir, result.reportName)
if result.reportCells:
rc = buildNotebook(profileInfo.appName, result, notebookPath, dataFilePath, result.runId)
if cprofile:
cprofile.disable()
if rc:
launchJupyter(profileInfo.homeDir)
else:
LOGGER.error('Aborting profile - no txn collected. Did you generate any transactions ?')
| 37.305882 | 115 | 0.709871 |
4a22adebb797e1a3f5f0a340e2a65cbae44461fb | 1,908 | py | Python | app/sensors/enabled/hih6130.py | jsolodev/nettemp | e1e9605b853689822f3e9c2a8ae9bf02e266359c | [
"MIT"
] | 51 | 2015-01-03T01:37:25.000Z | 2021-11-03T18:07:42.000Z | app/sensors/enabled/hih6130.py | jsolodev/nettemp | e1e9605b853689822f3e9c2a8ae9bf02e266359c | [
"MIT"
] | 18 | 2015-03-06T18:46:51.000Z | 2021-04-02T08:02:01.000Z | app/sensors/enabled/hih6130.py | jsolodev/nettemp | e1e9605b853689822f3e9c2a8ae9bf02e266359c | [
"MIT"
] | 51 | 2015-02-04T18:53:54.000Z | 2022-02-16T20:40:45.000Z | import smbus
import os.path
import sys
from datetime import datetime
dir=(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..','..','..')))
sys.path.append(dir+'/app')
from local_nettemp import insert
__all__ = ['HIH6130']
class HIH6130:
''' HIH6130() returns an instance of the RHT sensor with default address of 0x27. '''
def __init__(self, address = 0x27):
self.address = address
self.status = None
self.rh = None
self.t = None
self._buffer = None
self.timestamp = None
if len(sys.argv) > 1:
nbus = sys.argv[1]
elif os.path.exists("/dev/i2c-0"):
nbus = "0"
elif os.path.exists("/dev/i2c-1"):
nbus = "1"
elif os.path.exists("/dev/i2c-2"):
nbus = "2"
elif os.path.exists("/dev/i2c-3"):
nbus = "3"
try:
self.i2c = smbus.SMBus(int(nbus))
except:
raise IOError("Could not find i2c device.")
return
def read(self):
''' updates rh, t, and timestamp for the HIH6130 instance '''
try:
self._buffer = self.i2c.read_i2c_block_data(self.address, 1, 4)
except:
raise IOError("Could not read from i2c device located at %s." % self.address )
self.timestamp = datetime.now()
self.status = self._buffer[0] >> 6 & 0x03
self.rh = round(((self._buffer[0] & 0x3f) << 8 | self._buffer[1]) * 100.0 / (2**14 - 1), 2)
self.t = round((float((self._buffer[2] << 6) + (self._buffer[3] >> 2)) / (2**14 - 1)) * 165.0 - 40, 2)
return
try:
rht = HIH6130()
rht.read()
#print ("{0}\n{1}".format(rht.rh, rht.t))
rom = "i2c_27_temp"
value = '{0:0.2f}'.format(rht.t)
name = 'hih6130-temp'
type = 'temp'
data=insert(rom, type, value, name)
data.request()
rom = "i2c_27_humid"
value = '{0:0.2f}'.format(rht.rh)
name = 'hih6130-humid'
type = 'humid'
data=insert(rom, type, value, name)
data.request()
except:
print ("No HIH6130")
| 23.268293 | 104 | 0.598008 |
4a22ae81a345d82fe72df27c4b96efbee9fa761e | 1,765 | py | Python | backend/api/views/project.py | daobook/doccano | 45122687740f74f19e2578c5cf28507f0839bf16 | [
"MIT"
] | null | null | null | backend/api/views/project.py | daobook/doccano | 45122687740f74f19e2578c5cf28507f0839bf16 | [
"MIT"
] | null | null | null | backend/api/views/project.py | daobook/doccano | 45122687740f74f19e2578c5cf28507f0839bf16 | [
"MIT"
] | null | null | null | from django.conf import settings
from rest_framework import generics, status
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from members.permissions import IsInProjectReadOnlyOrAdmin
from ..models import Project
from ..serializers import ProjectPolymorphicSerializer
class ProjectList(generics.ListCreateAPIView):
serializer_class = ProjectPolymorphicSerializer
pagination_class = None
def get_permissions(self):
if self.request.method == 'GET':
self.permission_classes = [IsAuthenticated, ]
else:
self.permission_classes = [IsAuthenticated & IsAdminUser]
return super().get_permissions()
def get_queryset(self):
return Project.objects.filter(role_mappings__user=self.request.user)
def perform_create(self, serializer):
serializer.save(created_by=self.request.user)
def delete(self, request, *args, **kwargs):
delete_ids = request.data['ids']
projects = Project.objects.filter(
role_mappings__user=self.request.user,
role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,
pk__in=delete_ids
)
# Todo: I want to use bulk delete.
# But it causes the constraint error.
# See https://github.com/django-polymorphic/django-polymorphic/issues/229
for project in projects:
project.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Project.objects.all()
serializer_class = ProjectPolymorphicSerializer
lookup_url_kwarg = 'project_id'
permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]
| 36.020408 | 81 | 0.729745 |
4a22af2b446e349ab69c87d8136b05831511abbc | 80,879 | py | Python | qa/L0_lifecycle/lifecycle_test.py | szalpal/server | 85bf86813bce30a6b8e9f66bde057e2145530b7e | [
"BSD-3-Clause"
] | null | null | null | qa/L0_lifecycle/lifecycle_test.py | szalpal/server | 85bf86813bce30a6b8e9f66bde057e2145530b7e | [
"BSD-3-Clause"
] | null | null | null | qa/L0_lifecycle/lifecycle_test.py | szalpal/server | 85bf86813bce30a6b8e9f66bde057e2145530b7e | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
from builtins import range
from future.utils import iteritems
import os
import shutil
import time
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
import tritongrpcclient as grpcclient
import tritonhttpclient as httpclient
from tritonclientutils import InferenceServerException
class LifeCycleTest(tu.TestResultCollector):
def _infer_success_models(self,
model_base_names,
versions,
tensor_shape,
swap=False):
for base_name in model_base_names:
try:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
# FIXME is_server_ready should be true here DLIS-1296
# self.assertTrue(triton_client.is_server_ready())
for v in versions:
self.assertTrue(
triton_client.is_model_ready(model_name, str(v)))
for v in versions:
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=v,
swap=(swap or (v == 3)))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_noexit(self):
# Server was started with invalid args and
# --exit-on-error=false so expect it to be running with
# SERVER_FAILED_TO_INITIALIZE status.
# Server is not live and not ready regardless of --strict-readiness
try:
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
self.assertFalse(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
md = triton_client.get_server_metadata()
self.assertEqual(os.environ["TRITON_SERVER_VERSION"], md.version)
self.assertEqual("triton", md.name)
except InferenceServerException as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
self.assertFalse(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
md = triton_client.get_server_metadata()
self.assertEqual(os.environ["TRITON_SERVER_VERSION"], md['version'])
self.assertEqual("triton", md['name'])
except InferenceServerException as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_modelfail(self):
# --strict-readiness=true so server is live but not ready
tensor_shape = (1, 16)
# Server was started but with a model that fails to load
try:
model_name = tu.get_model_name('graphdef', np.float32, np.float32,
np.float32)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Inferencing with the missing model should fail.
try:
iu.infer_exact(self, 'graphdef', tensor_shape, 1, np.float32,
np.float32, np.float32)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' has no available versions"
))
# And other models should be loaded successfully
try:
for base_name in ["savedmodel", 'netdef']:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_modelfail_nostrict(self):
# --strict-readiness=false so server is live and ready
tensor_shape = (1, 16)
# Server was started but with a model that fails to load
try:
model_name = tu.get_model_name('graphdef', np.float32, np.float32,
np.float32)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Inferencing with the missing model should fail.
try:
iu.infer_exact(self, 'graphdef', tensor_shape, 1, np.float32,
np.float32, np.float32)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' has no available versions"
))
# And other models should be loaded successfully
try:
for base_name in ["savedmodel", 'netdef']:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_no_model_config(self):
tensor_shape = (1, 16)
# Server was started but with a model that fails to be polled
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
model_name = tu.get_model_name('graphdef', np.float32,
np.float32, np.float32)
# expecting ready because not strict readiness
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
md = triton_client.get_model_metadata(model_name, "1")
self.assertTrue(
False, "expected model '" + model_name +
"' to be ignored due to polling failure")
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' is not found"
))
# And other models should be loaded successfully
try:
for base_name in ["savedmodel", 'netdef']:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name, "1"))
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_init_error_modelfail(self):
# --strict-readiness=true so server is live but not ready
# Server was started but with models that fail to load
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
# one model uses sequence batcher while the other uses dynamic batcher
model_names = [
"custom_sequence_int32", "custom_int32_int32_int32"
]
for model_name in model_names:
self.assertFalse(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# And other models should be loaded successfully
try:
for base_name in ["graphdef", "savedmodel", 'netdef']:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
tensor_shape = (1, 16)
for base_name in ["graphdef", "savedmodel", 'netdef']:
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_model_no_version(self):
# --strict-readiness=true so server is live but not ready
tensor_shape = (1, 16)
# Server was started but with a model that fails to load
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
model_name = tu.get_model_name('graphdef', np.float32,
np.float32, np.float32)
self.assertFalse(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Sanity check that other models are loaded properly
try:
for base_name in ["savedmodel", "netdef"]:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name))
for version in ["1", "3"]:
model_name = tu.get_model_name("plan", np.float32,
np.float32, np.float32)
self.assertTrue(
triton_client.is_model_ready(model_name, version))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
for base_name in ["savedmodel", "netdef"]:
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
for version in [1, 3]:
iu.infer_exact(self,
'plan',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
iu.infer_exact(self, 'graphdef', tensor_shape, 1, np.float32,
np.float32, np.float32)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' has no available versions"
))
def test_parse_ignore_zero_prefixed_version(self):
tensor_shape = (1, 16)
# Server was started but only version 1 is loaded
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
model_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
# swap=False for version 1
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=False)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_model_load_unload(self):
tensor_shape = (1, 16)
savedmodel_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
netdef_name = tu.get_model_name('netdef', np.float32, np.float32,
np.float32)
# Make sure savedmodel model is not in the status (because
# initially it is not in the model repository)
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Add savedmodel model to the model repository and give it time to
# load. Make sure that it has a status and is ready.
try:
shutil.copytree(savedmodel_name, "models/" + savedmodel_name)
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on the just loaded model
try:
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Make sure savedmodel has execution stats
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats["model_stats"]), 2)
for idx in range(len(stats["model_stats"])):
self.assertEqual(stats["model_stats"][idx]["name"],
savedmodel_name)
if stats["model_stats"][idx]["version"] == "1":
self.assertEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
else:
self.assertNotEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats.model_stats), 2)
for idx in range(len(stats.model_stats)):
self.assertEqual(stats.model_stats[idx].name, savedmodel_name)
if stats.model_stats[idx].version == "1":
self.assertEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
else:
self.assertNotEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove savedmodel model from the model repository and give it
# time to unload. Make sure that it is no longer available.
try:
shutil.rmtree("models/" + savedmodel_name)
time.sleep(5) # wait for model to unload
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Model is removed so inference should fail
try:
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
self.assertTrue(
False,
"expected error for unavailable model " + savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'savedmodel_float32_float32_float32' has no available versions"
))
# Add back the same model. The status/stats should be reset.
try:
shutil.copytree(savedmodel_name, "models/" + savedmodel_name)
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats["model_stats"]), 2)
self.assertEqual(stats["model_stats"][0]["name"], savedmodel_name)
self.assertEqual(stats["model_stats"][1]["name"], savedmodel_name)
self.assertEqual(
stats["model_stats"][0]["inference_stats"]["success"]["count"],
0)
self.assertEqual(
stats["model_stats"][1]["inference_stats"]["success"]["count"],
0)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats.model_stats), 2)
self.assertEqual(stats.model_stats[0].name, savedmodel_name)
self.assertEqual(stats.model_stats[1].name, savedmodel_name)
self.assertEqual(stats.model_stats[0].inference_stats.success.count,
0)
self.assertEqual(stats.model_stats[1].inference_stats.success.count,
0)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove netdef model from the model repository and give it
# time to unload. Make sure that it is unavailable.
try:
shutil.rmtree("models/" + netdef_name)
time.sleep(5) # wait for model to unload
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertFalse(triton_client.is_model_ready(netdef_name, "1"))
self.assertFalse(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Model is removed so inference should fail
try:
iu.infer_exact(self,
'netdef',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
self.assertTrue(
False, "expected error for unavailable model " + netdef_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'netdef_float32_float32_float32' has no available versions"
))
def test_dynamic_model_load_unload_disabled(self):
tensor_shape = (1, 16)
savedmodel_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
netdef_name = tu.get_model_name('netdef', np.float32, np.float32,
np.float32)
# Make sure savedmodel model is not in the status (because
# initially it is not in the model repository)
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Add savedmodel model to the model repository and give it time to
# load. But it shouldn't load because dynamic loading is disabled.
try:
shutil.copytree(savedmodel_name, "models/" + savedmodel_name)
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference which should fail because the model isn't there
try:
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
self.assertTrue(
False,
"expected error for unavailable model " + savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'savedmodel_float32_float32_float32' is not found"
))
# Remove one of the original models from the model repository.
# Unloading is disabled so it should remain available in the status.
try:
shutil.rmtree("models/" + netdef_name)
time.sleep(5) # wait for model to unload (but it shouldn't)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference to make sure model still being served even
# though deleted from model repository
try:
iu.infer_exact(self,
'netdef',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_version_load_unload(self):
tensor_shape = (1, 16)
graphdef_name = tu.get_model_name('graphdef', np.int32, np.int32,
np.int32)
# There are 3 versions. Make sure that all have status and are
# ready.
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on version 1 to make sure it is available
try:
iu.infer_exact(self,
'graphdef',
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
swap=False,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Make sure only version 1 has execution stats in the status.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
stats = triton_client.get_inference_statistics(graphdef_name)
self.assertEqual(len(stats["model_stats"]), 3)
for idx in range(len(stats["model_stats"])):
self.assertEqual(stats["model_stats"][idx]["name"],
graphdef_name)
if stats["model_stats"][idx]["version"] == "1":
self.assertNotEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
else:
self.assertEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
stats = triton_client.get_inference_statistics(graphdef_name)
self.assertEqual(len(stats.model_stats), 3)
for idx in range(len(stats.model_stats)):
self.assertEqual(stats.model_stats[idx].name, graphdef_name)
if stats.model_stats[idx].version == "1":
self.assertNotEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
else:
self.assertEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove version 1 from the model repository and give it time to
# unload. Make sure that it is unavailable.
try:
shutil.rmtree("models/" + graphdef_name + "/1")
time.sleep(5) # wait for version to unload
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Version is removed so inference should fail
try:
iu.infer_exact(self,
'graphdef',
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
swap=False,
model_version=1)
self.assertTrue(
False, "expected error for unavailable model " + graphdef_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_int32_int32_int32' version 1 is not at ready state"
))
# Add another version to the model repository.
try:
shutil.copytree("models/" + graphdef_name + "/2",
"models/" + graphdef_name + "/7")
time.sleep(5) # wait for version to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "7"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_version_load_unload_disabled(self):
tensor_shape = (1, 16)
graphdef_name = tu.get_model_name('graphdef', np.int32, np.int32,
np.int32)
# Add a new version to the model repository and give it time to
# load. But it shouldn't load because dynamic loading is
# disabled.
try:
shutil.copytree("models/" + graphdef_name + "/2",
"models/" + graphdef_name + "/7")
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "7"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove one of the original versions from the model repository.
# Unloading is disabled so it should remain available
# in the status.
try:
shutil.rmtree("models/" + graphdef_name + "/1")
time.sleep(5) # wait for version to unload (but it shouldn't)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "7"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference to make sure model still being served even
# though version deleted from model repository
try:
iu.infer_exact(self,
'graphdef',
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
swap=False,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_model_modify(self):
models_base = ('savedmodel', 'plan')
models_shape = ((1, 16), (1, 16))
models = list()
for m in models_base:
models.append(
tu.get_model_name(m, np.float32, np.float32, np.float32))
# Make sure savedmodel and plan are in the status
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on the model, both versions 1 and 3
for version in (1, 3):
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Change the model configuration to use wrong label file
for base_name, model_name in zip(models_base, models):
shutil.copyfile("config.pbtxt.wrong." + base_name,
"models/" + model_name + "/config.pbtxt")
time.sleep(5) # wait for models to reload
for model_name in models:
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version,
output0_raw=False)
self.assertTrue(
False,
"expected error for wrong label for " + model_name)
except AssertionError as ex:
self.assertTrue("'label9" in str(ex) and "!=" in str(ex),
str(ex))
# Change the model configuration to use correct label file and to have
# the default version policy (so that only version 3) is available.
for base_name, model_name in zip(models_base, models):
shutil.copyfile("config.pbtxt." + base_name,
"models/" + model_name + "/config.pbtxt")
time.sleep(5) # wait for models to reload
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Attempt inferencing using version 1, should fail since
# change in model policy makes that no longer available.
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=False,
model_version=1)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
# Version 3 should continue to work...
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True,
model_version=3)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_file_delete(self):
models_base = ('savedmodel', 'plan')
models_shape = ((1, 16), (1, 16))
models = list()
for m in models_base:
models.append(
tu.get_model_name(m, np.float32, np.float32, np.float32))
# Make sure savedmodel and plan are in the status
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on the model, both versions 1 and 3
for version in (1, 3):
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Delete model configuration, which cause model to be
# re-loaded and use autofilled config, which means that
# version policy will be latest and so only version 3 will be
# available
for model_name in models:
os.remove("models/" + model_name + "/config.pbtxt")
time.sleep(5) # wait for models to reload
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Only version 3 (latest) should work...
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True,
model_version=3)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=False,
model_version=1)
self.assertTrue(
False,
"expected error for unavailable model " + graphdef_name)
except Exception as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
def test_multiple_model_repository_polling(self):
model_shape = (1, 16)
savedmodel_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
# Models should be loaded successfully and infer
# successfully. Initially savedmodel only has version 1.
self._infer_success_models([
"savedmodel",
], (1,), model_shape)
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Add the savedmodel to the second model repository, should cause
# it to be unloaded due to duplication
shutil.copytree(savedmodel_name, "models_0/" + savedmodel_name)
time.sleep(5) # wait for models to reload
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Remove the savedmodel from the first model repository, the
# model from the second model repository should be loaded
# properly. In the second model repository savedmodel should
# have versions 1 and 3.
shutil.rmtree("models/" + savedmodel_name)
time.sleep(5) # wait for model to unload
self._infer_success_models(["savedmodel", "graphdef", 'netdef'], (1, 3),
model_shape)
def test_multiple_model_repository_control(self):
# similar to test_multiple_model_repository_polling, but the
# model load/unload is controlled by the API
model_shape = (1, 16)
savedmodel_name = tu.get_model_name("savedmodel", np.float32,
np.float32, np.float32)
model_bases = ['savedmodel', "graphdef", 'netdef']
# Initially models are not loaded
for base in model_bases:
try:
model_name = tu.get_model_name(base, np.float32, np.float32,
np.float32)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Load all models, here we use GRPC
for base in model_bases:
try:
model_name = tu.get_model_name(base, np.float32, np.float32,
np.float32)
triton_client = grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)
triton_client.load_model(model_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Models should be loaded successfully and infer
# successfully. Initially savedmodel only has version 1.
self._infer_success_models([
"savedmodel",
], (1,), model_shape)
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Add the savedmodel to the second model repository. Because
# not polling this doesn't change any model state, all models
# are still loaded and available.
shutil.copytree(savedmodel_name, "models_0/" + savedmodel_name)
self._infer_success_models([
"savedmodel",
], (1,), model_shape)
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Reload savedmodel which will cause it to unload because it
# is in 2 model repositories. Use HTTP here.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load '{}'".format(savedmodel_name)))
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Remove the savedmodel from the first model repository and
# explicitly load savedmodel. The savedmodel from the second
# model repository should be loaded properly. In the second
# model repository savedmodel should have versions 1 and 3.
shutil.rmtree("models/" + savedmodel_name)
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load '{}'".format(savedmodel_name)))
self._infer_success_models(["savedmodel", "graphdef", 'netdef'], (1, 3),
model_shape)
def test_model_control(self):
model_shape = (1, 16)
onnx_name = tu.get_model_name('onnx', np.float32, np.float32,
np.float32)
ensemble_prefix = "simple_"
ensemble_name = ensemble_prefix + onnx_name
# Make sure no models are loaded
for model_name in (onnx_name, ensemble_name):
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Load non-existent model
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.load_model("unknown_model")
self.assertTrue(False, "expected unknown model failure")
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load 'unknown_model', no version is available"))
# Load ensemble model, the dependent model should be polled and loaded
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(ensemble_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (1, 3), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
# Delete model configuration for onnx, which will cause
# the autofiller to use the latest version policy so that only
# version 3 will be available if the models are re-loaded
for model_name in (onnx_name,):
os.remove("models/" + model_name + "/config.pbtxt")
self._infer_success_models([
"onnx",
], (1, 3), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
# Reload models, only version 3 should be available for onnx
for model_name in (onnx_name, ensemble_name):
try:
triton_client = grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)
triton_client.load_model(model_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
for model_name in (onnx_name,):
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload non-existing model, nothing should happen
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.unload_model("unknown_model")
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload the depending model, as side effect, the ensemble model will be
# forced to be unloaded
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
for model_name in (onnx_name, ensemble_name):
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Explicitly unload the ensemble and load the depending
# model. The ensemble model should not be reloaded because it
# was explicitly unloaded.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(ensemble_name)
triton_client.load_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(ensemble_name, "1"))
self.assertFalse(
triton_client.is_model_ready(ensemble_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_multiple_model_repository_control_startup_models(self):
model_shape = (1, 16)
onnx_name = tu.get_model_name('onnx', np.float32, np.float32,
np.float32)
plan_name = tu.get_model_name('plan', np.float32, np.float32,
np.float32)
ensemble_prefix = "simple_"
onnx_ensemble_name = ensemble_prefix + onnx_name
plan_ensemble_name = ensemble_prefix + plan_name
# Make sure unloaded models are not in the status
for base in ("netdef",):
model_name = tu.get_model_name(base, np.float32, np.float32,
np.float32)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# And loaded models work properly
self._infer_success_models([
"onnx",
], (1, 3), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
self._infer_success_models([
"plan",
], (1, 3), model_shape)
# Load non-existing model
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.load_model("unknown_model")
self.assertTrue(False, "expected unknown model failure")
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load 'unknown_model', no version is available"))
# Load plan ensemble model, the dependent model is already
# loaded via command-line
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(plan_ensemble_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"plan",
], (1, 3), model_shape)
self._infer_success_models([
"simple_plan",
], (1, 3),
model_shape,
swap=True)
# Delete model configuration, which will cause the autofiller
# to use the latest version policy so that only version 3 will
# be available if the models are re-loaded
os.remove("models/" + onnx_name + "/config.pbtxt")
self._infer_success_models([
"plan",
], (1, 3), model_shape)
self._infer_success_models([
"simple_plan",
], (1, 3),
model_shape,
swap=True)
# Reload onnx, only version 3 should be available
try:
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
triton_client.load_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(onnx_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload non-existing model, nothing should happen
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.unload_model("unknown_model")
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload the onnx, as side effect, the ensemble model
# will be forced to be unloaded
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
for model_name in [onnx_name, onnx_ensemble_name]:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Explicitly unload the onnx ensemble and load the
# depending model. The ensemble model should not be reloaded
# because it was explicitly unloaded.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(onnx_ensemble_name)
triton_client.load_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
self._infer_success_models([
"plan",
], (1, 3), model_shape)
self._infer_success_models([
"simple_plan",
], (1, 3),
model_shape,
swap=True)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(onnx_ensemble_name, "1"))
self.assertFalse(
triton_client.is_model_ready(onnx_ensemble_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_model_repository_index(self):
# use model control EXPLIT and --load-model to load a subset of models
# in model repository
tensor_shape = (1, 16)
model_bases = ["graphdef", "savedmodel", "simple_savedmodel"]
# Sanity check on loaded models
# 3 models should be loaded:
# simple_savedmodel_float32_float32_float32
# savedmodel_float32_float32_float32
# graphdef_float32_float32_float32
for model_base in model_bases:
try:
model_name = tu.get_model_name(model_base, np.float32,
np.float32, np.float32)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Check model repository index
# All models should be in ready state except netdef_float32_float32_float32
# which appears in two repositories.
model_bases.append("simple_graphdef")
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
index = triton_client.get_model_repository_index()
indexed = list()
self.assertEqual(len(index), 8)
for i in index:
indexed.append(i["name"])
if i["name"] == "netdef_float32_float32_float32":
self.assertEqual(i["state"], "UNAVAILABLE")
self.assertEqual(
i["reason"],
"model appears in two or more repositories")
for model_base in model_bases:
model_name = tu.get_model_name(model_base, np.float32,
np.float32, np.float32)
self.assertTrue(model_name in indexed)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
index = triton_client.get_model_repository_index()
indexed = list()
self.assertEqual(len(index.models), 8)
for i in index.models:
indexed.append(i.name)
if i.name == "netdef_float32_float32_float32":
self.assertEqual(i.state, "UNAVAILABLE")
self.assertEqual(
i.reason, "model appears in two or more repositories")
for model_base in model_bases:
model_name = tu.get_model_name(model_base, np.float32,
np.float32, np.float32)
self.assertTrue(model_name in indexed)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
if __name__ == '__main__':
unittest.main()
| 48.085018 | 107 | 0.518503 |
4a22af2fffc3e4b580d7a2fdcaa0bf58656fde7a | 3,869 | py | Python | eval.py | dimenerno/cnn-text-korean | 39df71c6d8a05b9ea8becd2a70267b5fc442941c | [
"Apache-2.0"
] | 77 | 2016-12-16T02:06:38.000Z | 2021-07-19T04:34:01.000Z | eval.py | dimenerno/cnn-text-korean | 39df71c6d8a05b9ea8becd2a70267b5fc442941c | [
"Apache-2.0"
] | 1 | 2019-03-25T06:41:10.000Z | 2019-03-25T06:41:10.000Z | eval.py | dimenerno/cnn-text-korean | 39df71c6d8a05b9ea8becd2a70267b5fc442941c | [
"Apache-2.0"
] | 63 | 2016-12-16T02:06:50.000Z | 2020-12-07T01:39:01.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import json
import tensorflow as tf
import numpy as np
import os
import data_helpers
from multi_class_data_loader import MultiClassDataLoader
from word_data_processor import WordDataProcessor
import csv
# Parameters
# ==================================================
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
data_loader = MultiClassDataLoader(tf.flags, WordDataProcessor())
data_loader.define_flags()
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
if FLAGS.eval_train:
x_raw, y_test = data_loader.load_data_and_labels()
y_test = np.argmax(y_test, axis=1)
else:
x_raw, y_test = data_loader.load_dev_data_and_labels()
y_test = np.argmax(y_test, axis=1)
# checkpoint_dir이 없다면 가장 최근 dir 추출하여 셋팅
if FLAGS.checkpoint_dir == "":
all_subdirs = ["./runs/" + d for d in os.listdir('./runs/.') if os.path.isdir("./runs/" + d)]
latest_subdir = max(all_subdirs, key=os.path.getmtime)
FLAGS.checkpoint_dir = latest_subdir + "/checkpoints/"
# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = data_loader.restore_vocab_processor(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
print("\nEvaluating...\n")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
# Print accuracy if y_test is defined
if y_test is not None:
correct_predictions = float(sum(all_predictions == y_test))
print("Total number of test examples: {}".format(len(y_test)))
print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
# Save the evaluation to a csv
class_predictions = data_loader.class_labels(all_predictions.astype(int))
predictions_human_readable = np.column_stack((np.array(x_raw), class_predictions))
out_path = os.path.join(FLAGS.checkpoint_dir, "../../../", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w') as f:
csv.writer(f).writerows(predictions_human_readable) | 39.080808 | 102 | 0.715172 |
4a22af98f2631a2e22ece7de7d73d87c8c0c4400 | 283 | py | Python | givemedata/__init__.py | sancau/givemedata | cbdb92ef72cb02f01c13f5230162c719b2abf9da | [
"MIT"
] | 2 | 2019-05-17T10:01:25.000Z | 2020-06-01T08:04:06.000Z | givemedata/__init__.py | sancau/givemedata | cbdb92ef72cb02f01c13f5230162c719b2abf9da | [
"MIT"
] | 5 | 2020-03-24T16:57:34.000Z | 2021-09-13T13:23:49.000Z | givemedata/__init__.py | sancau/givemedata | cbdb92ef72cb02f01c13f5230162c719b2abf9da | [
"MIT"
] | 1 | 2019-05-17T10:02:23.000Z | 2019-05-17T10:02:23.000Z | from .givemedata import Data
from .givemedata import get_provider_from_config
from .givemedata import init_provider
from .version import VERSION as __version__
from . import utils
__all__ = [
Data,
get_provider_from_config,
init_provider,
utils,
__version__,
]
| 18.866667 | 48 | 0.766784 |
4a22b0683cd0e415595b45751213c88f0e9b06a6 | 1,892 | py | Python | sgmcmcjax/models/logistic_regression.py | ColCarroll/SGMCMCJax | de1dbf234577fa46ecc98c7c7de4ef547cef52ea | [
"Apache-2.0"
] | null | null | null | sgmcmcjax/models/logistic_regression.py | ColCarroll/SGMCMCJax | de1dbf234577fa46ecc98c7c7de4ef547cef52ea | [
"Apache-2.0"
] | null | null | null | sgmcmcjax/models/logistic_regression.py | ColCarroll/SGMCMCJax | de1dbf234577fa46ecc98c7c7de4ef547cef52ea | [
"Apache-2.0"
] | null | null | null | import warnings
import jax.numpy as jnp
import numpy as np
from jax import grad, jit, random, value_and_grad, vmap
from jax.scipy.special import logsumexp
# ignore by GPU/TPU message (generated by jax module)
warnings.filterwarnings("ignore", message="No GPU/TPU found, falling back to CPU.")
def genCovMat(key, d, rho):
Sigma0 = np.diag(np.ones(d))
for i in range(1, d):
for j in range(0, i):
Sigma0[i, j] = (random.uniform(key) * 2 * rho - rho) ** (i - j)
Sigma0[j, i] = Sigma0[i, j]
return jnp.array(Sigma0)
def logistic(theta, x):
return 1 / (1 + jnp.exp(-jnp.dot(theta, x)))
batch_logistic = jit(vmap(logistic, in_axes=(None, 0)))
batch_benoulli = vmap(random.bernoulli, in_axes=(0, 0))
def gen_data(key, dim, N):
"""
Generate data with dimension `dim` and `N` data points
Parameters
----------
key: uint32
random key
dim: int
dimension of data
N: int
Size of dataset
Returns
-------
theta_true: ndarray
Theta array used to generate data
X: ndarray
Input data, shape=(N,dim)
y_data: ndarray
Output data: 0 or 1s. shape=(N,)
"""
key, subkey1, subkey2, subkey3 = random.split(key, 4)
rho = 0.4
print(f"generating data, with N={N} and dim={dim}")
theta_true = random.normal(subkey1, shape=(dim,)) * jnp.sqrt(10)
covX = genCovMat(subkey2, dim, rho)
X = jnp.dot(random.normal(subkey3, shape=(N, dim)), jnp.linalg.cholesky(covX))
p_array = batch_logistic(theta_true, X)
keys = random.split(key, N)
y_data = batch_benoulli(keys, p_array).astype(jnp.int32)
return theta_true, X, y_data
@jit
def loglikelihood(theta, x_val, y_val):
return -logsumexp(jnp.array([0.0, (1.0 - 2.0 * y_val) * jnp.dot(theta, x_val)]))
@jit
def logprior(theta):
return -(0.5 / 10) * jnp.dot(theta, theta)
| 25.917808 | 84 | 0.625264 |
4a22b1f84750a6bb8208b421dc2634ce7ff20827 | 3,451 | py | Python | SimVascular-master/Externals/Make/2019.06/BuildHelpers/Jupyter/site-packages/simvascular_tcl_kernel/simvascular_tcl_proxy.py | mccsssk2/SimVascularPM3_March2020 | 3cce6cc7be66545bea5dc3915a2db50a3892bf04 | [
"BSD-3-Clause"
] | null | null | null | SimVascular-master/Externals/Make/2019.06/BuildHelpers/Jupyter/site-packages/simvascular_tcl_kernel/simvascular_tcl_proxy.py | mccsssk2/SimVascularPM3_March2020 | 3cce6cc7be66545bea5dc3915a2db50a3892bf04 | [
"BSD-3-Clause"
] | null | null | null | SimVascular-master/Externals/Make/2019.06/BuildHelpers/Jupyter/site-packages/simvascular_tcl_kernel/simvascular_tcl_proxy.py | mccsssk2/SimVascularPM3_March2020 | 3cce6cc7be66545bea5dc3915a2db50a3892bf04 | [
"BSD-3-Clause"
] | null | null | null | import threading
try:
import queue
except ImportError:
import Queue as queue
from threading import Timer
from time import sleep
class ReplReader(threading.Thread):
def __init__(self, repl):
super(ReplReader, self).__init__()
self.repl = repl
self.daemon = True
self.queue = queue.Queue()
self.start()
def run(self):
r = self.repl
q = self.queue
while True:
result = r.read()
q.put(result)
if result is None:
break
class ReplProxy(object):
def __init__(self, repl):
self._repl = repl
self._repl_reader = ReplReader(repl)
# this is a hack to detect when we stop processing this inpu
self.send_input('set tcl_interactive 1;set tcl_prompt1 {puts -nonewline ^}')
self.stop_flag = False
self.output = ''
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
# get preambula and eveluation of the prompt
self.get_output()
self.output_prefix_stripped = True
self.expected_output_prefix = ''
self.expected_output_len = 0
def get_output(self):
while not self.stop_flag:
sleep(0.05)
out = self.output
self.output = ''
self.stop_flag = False
return out
def send_input(self, input):
# TODO: we should block here until we return output for previous command, should we?
# for multiline statements we should send 1 extra new line
# https://stackoverflow.com/questions/13229066/how-to-end-a-multi-line-command-in-powershell
if '\n' in input:
input += '\n'
self.expected_output_prefix = input.replace('\n', '\n>> ') + '\n'
self.expected_output_len = len(self.expected_output_prefix)
self.output_prefix_stripped = False
self._repl.write(input + '\n')
def handle_repl_output(self):
"""Returns new data from Repl and bool indicating if Repl is still
working"""
if self.stop_flag:
return True
try:
while True:
packet = self._repl_reader.queue.get_nowait()
if packet is None:
return False
self.write(packet)
except queue.Empty:
return True
def update_view_loop(self):
is_still_working = self.handle_repl_output()
if is_still_working:
self.timer = Timer(0.1, self.update_view_loop)
self.timer.start()
else:
self.write("\n***Repl Killed***\n""")
def write(self, packet):
#self.output += packet
#if packet == '%':
#self.stop_flag = True
#return
# this is a hack to detect when we stop processing this input
if packet == '^':
self.stop_flag = True
return
self.output += packet
if not self.output_prefix_stripped and len(self.output) >= self.expected_output_len:
if self.output[:self.expected_output_len] != self.expected_output_prefix:
print("Unexpected prefix: %r : Expected %r" % (
self.output[:self.expected_output_len], self.expected_output_prefix
))
else:
self.output_prefix_stripped = True
self.output = self.output[self.expected_output_len:]
| 31.372727 | 100 | 0.583599 |
4a22b248b277c06e7a3107ea2378a9e2f0ce5376 | 366 | py | Python | codes/models/__init__.py | IceClear/MW-GAN | acb962468c984681c4a21f7b5c14588ca8f58c00 | [
"MIT"
] | 36 | 2020-08-12T05:17:42.000Z | 2022-03-22T03:02:21.000Z | codes/models/__init__.py | IceClear/MW-GAN | acb962468c984681c4a21f7b5c14588ca8f58c00 | [
"MIT"
] | 15 | 2021-01-19T08:24:58.000Z | 2021-11-16T15:52:58.000Z | codes/models/__init__.py | RyanXingQL/MW-GAN | 562199344e322919a108048acd55b0dd8820df55 | [
"MIT"
] | 8 | 2020-10-23T14:15:15.000Z | 2021-12-23T02:18:23.000Z | import logging
logger = logging.getLogger('base')
def create_model(opt):
model = opt['model']
if model == 'mwgan':
from .MWGAN_model import MWGANModel as M
else:
raise NotImplementedError('Model [{:s}] not recognized.'.format(model))
m = M(opt)
logger.info('Model [{:s}] is created.'.format(m.__class__.__name__))
return m
| 24.4 | 79 | 0.642077 |
4a22b381cc5d052fb2bda0408fdf39dfda971da4 | 788 | py | Python | examples/dashboard.py | fusioncharts/fusionexport-python-client | 03e5114b79414a81c2f8712c55cc0c3709479922 | [
"MIT"
] | 1 | 2018-05-24T09:06:23.000Z | 2018-05-24T09:06:23.000Z | examples/dashboard.py | fusioncharts/fusionexport-python-client | 03e5114b79414a81c2f8712c55cc0c3709479922 | [
"MIT"
] | null | null | null | examples/dashboard.py | fusioncharts/fusionexport-python-client | 03e5114b79414a81c2f8712c55cc0c3709479922 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from fusionexport import ExportManager, ExportConfig # Import sdk
# Instantiate the ExportConfig class and add the required configurations
export_config = ExportConfig()
export_config["chartConfig"] = "resources/multiple.json"
export_config["templateFilePath"] = "resources/template.html"
export_config["type"] = "png"
export_config["templateFormat"] = "A1"
export_config["templateHeight"] = 600
# Provide port and host of FusionExport Service
export_server_host = "127.0.0.1"
export_server_port = 1337
# Instantiate the ExportManager class
em = ExportManager(export_server_host, export_server_port)
# Call the export() method with the export config and the output location
exported_files = em.export(export_config, "./exports", True)
#print(exported_files) | 31.52 | 73 | 0.790609 |
4a22b3e300a6bda689f8300d8475fea172829a89 | 3,760 | py | Python | huaweicloud-sdk-rms/huaweicloudsdkrms/v1/model/list_policy_states_by_assignment_id_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-rms/huaweicloudsdkrms/v1/model/list_policy_states_by_assignment_id_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-rms/huaweicloudsdkrms/v1/model/list_policy_states_by_assignment_id_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListPolicyStatesByAssignmentIdResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'value': 'list[PolicyState]',
'page_info': 'PageInfo'
}
attribute_map = {
'value': 'value',
'page_info': 'page_info'
}
def __init__(self, value=None, page_info=None):
"""ListPolicyStatesByAssignmentIdResponse - a model defined in huaweicloud sdk"""
super(ListPolicyStatesByAssignmentIdResponse, self).__init__()
self._value = None
self._page_info = None
self.discriminator = None
if value is not None:
self.value = value
if page_info is not None:
self.page_info = page_info
@property
def value(self):
"""Gets the value of this ListPolicyStatesByAssignmentIdResponse.
合规结果查询返回值
:return: The value of this ListPolicyStatesByAssignmentIdResponse.
:rtype: list[PolicyState]
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ListPolicyStatesByAssignmentIdResponse.
合规结果查询返回值
:param value: The value of this ListPolicyStatesByAssignmentIdResponse.
:type: list[PolicyState]
"""
self._value = value
@property
def page_info(self):
"""Gets the page_info of this ListPolicyStatesByAssignmentIdResponse.
:return: The page_info of this ListPolicyStatesByAssignmentIdResponse.
:rtype: PageInfo
"""
return self._page_info
@page_info.setter
def page_info(self, page_info):
"""Sets the page_info of this ListPolicyStatesByAssignmentIdResponse.
:param page_info: The page_info of this ListPolicyStatesByAssignmentIdResponse.
:type: PageInfo
"""
self._page_info = page_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListPolicyStatesByAssignmentIdResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.647059 | 89 | 0.583511 |
4a22b41ec5d6d147a7ee6042117c262005bcdb07 | 1,172 | py | Python | cli/bandetl/__main__.py | blockchain-etl/band-protocol-etl | 84a0819a89fdcbce75efc9f59e0c1286954397c2 | [
"MIT"
] | 5 | 2020-08-27T14:27:10.000Z | 2021-08-17T14:15:29.000Z | cli/bandetl/__main__.py | blockchain-etl/band-protocol-etl | 84a0819a89fdcbce75efc9f59e0c1286954397c2 | [
"MIT"
] | 4 | 2020-09-09T16:48:46.000Z | 2020-09-22T12:14:50.000Z | cli/bandetl/__main__.py | blockchain-etl/band-etl | 84a0819a89fdcbce75efc9f59e0c1286954397c2 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Evgeny Medvedev, [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bandetl.cli import cli
cli()
| 43.407407 | 80 | 0.776451 |
4a22b44912aa4ac2e089dbafca88e788d50340f2 | 3,788 | py | Python | src/compas_igs/ui/Rhino/IGS/dev/IGS_edge_information_cmd.py | BlockResearchGroup/compas-IGS | b40698466b91c867600b94ae2530b19d336ad1b0 | [
"MIT"
] | 1 | 2021-11-03T23:22:37.000Z | 2021-11-03T23:22:37.000Z | src/compas_igs/ui/Rhino/IGS/dev/IGS_edge_information_cmd.py | BlockResearchGroup/compas-IGS | b40698466b91c867600b94ae2530b19d336ad1b0 | [
"MIT"
] | 1 | 2021-11-10T03:27:58.000Z | 2021-11-17T13:51:17.000Z | src/compas_igs/ui/Rhino/IGS/dev/IGS_edge_information_cmd.py | BlockResearchGroup/compas-IGS | b40698466b91c867600b94ae2530b19d336ad1b0 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
import scriptcontext as sc
find_object = sc.doc.Objects.Find
__commandname__ = "IGS_edge_information"
def RunCommand(is_interactive):
if 'IGS' not in sc.sticky:
compas_rhino.display_message('IGS has not been initialised yet.')
return
scene = sc.sticky['IGS']['scene']
objects = scene.find_by_name('Form')
if not objects:
compas_rhino.display_message("There is no FormDiagram in the scene.")
return
form = objects[0]
objects = scene.find_by_name('Force')
if not objects:
compas_rhino.display_message("There is no ForceDiagram in the scene.")
return
force = objects[0]
scale = force.scale
form_settings = form.settings.copy()
force_settings = force.settings.copy()
form.settings['show.edges'] = True
form.settings['show.forcelabels'] = False
form.settings['show.edgelabels'] = False
form.settings['show.forcepipes'] = False
force.settings['show.edges'] = True
force.settings['show.forcelabels'] = False
force.settings['show.edgelabels'] = False
force.settings['show.constraints'] = False
scene.update()
curvefilter = compas_rhino.rs.filter.curve
edge_index = form.diagram.edge_index()
while True:
guid = compas_rhino.rs.GetObject(message="Select an edge in Form or Force Diagrams", preselect=True, select=True, filter=curvefilter)
if not guid:
break
elif guid not in form.guid_edge and guid not in force.guid_edge:
compas_rhino.display_message("Edge does not belog to form or force diagram.")
break
if guid in form.guid_edge:
edge_form = form.guid_edge[guid]
index = edge_index[edge_form]
edge_force = list(force.diagram.ordered_edges(form.diagram))[index]
if guid in force.guid_edge:
edge_force = force.guid_edge[guid]
edge_form = force.diagram.dual_edge(edge_force)
index = edge_index[edge_form]
f = form.diagram.edge_attribute(edge_form, 'f')
l = abs(f * scale) # noqa E741
tol = form.settings['tol.forces']
state = ''
if not form.diagram.edge_attribute(edge_form, 'is_external'):
if f > + tol:
state = 'in tension'
elif f < - tol:
state = 'in compression'
key2guid = {form.guid_edge[guid]: guid for guid in form.guid_edge}
key2guid.update({(v, u): key2guid[(u, v)] for u, v in key2guid})
find_object(key2guid[edge_form]).Select(True)
key2guid = {force.guid_edge[guid]: guid for guid in force.guid_edge}
key2guid.update({(v, u): key2guid[(u, v)] for u, v in key2guid})
if abs(f) > tol:
find_object(key2guid[edge_force]).Select(True)
form.draw_highlight_edge(edge_form)
force.draw_highlight_edge(edge_force)
compas_rhino.display_message(
"Edge Index: {0}\nForce Diagram Edge Length: {1:.3g}\nForce Drawing Scale: {2:.3g}\nForce Magnitude: {3:.3g}kN {4}".format(index, l, scale, abs(f), state))
answer = compas_rhino.rs.GetString("Continue selecting edges?", "No", ["Yes", "No"])
if not answer:
break
if answer == "No":
break
if answer == 'Yes':
scene.update()
form.settings = form_settings
force.settings = force_settings
scene.update()
scene.save()
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
RunCommand(True)
| 32.655172 | 167 | 0.610348 |
4a22b492f5d9031c6b91cef780b610dcbc296f05 | 1,933 | py | Python | signup/urls/views/__init__.py | djaodjin/djaodjin-signup | 24d4b3f4cf139df87aa8c1586763744434346edd | [
"BSD-2-Clause"
] | 14 | 2015-05-20T02:23:57.000Z | 2022-01-06T06:37:20.000Z | signup/urls/views/__init__.py | djaodjin/djaodjin-signup | 24d4b3f4cf139df87aa8c1586763744434346edd | [
"BSD-2-Clause"
] | 33 | 2016-02-05T15:11:35.000Z | 2022-02-21T09:02:06.000Z | signup/urls/views/__init__.py | djaodjin/djaodjin-signup | 24d4b3f4cf139df87aa8c1586763744434346edd | [
"BSD-2-Clause"
] | 7 | 2015-06-24T11:27:52.000Z | 2021-07-15T07:12:35.000Z | # Copyright (c) 2019, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
URLconf for frictionless signup.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration:
(r'^accounts/', include('signup.urls.views.accounts')),
Optionally add URLs for User profiles:
(r'^users/', include('signup.urls.views.users')),
"""
from django.conf.urls import include, url
urlpatterns = [
url(r'^contacts/', include('signup.urls.views.contacts')),
url(r'^users/', include('signup.urls.views.users')),
url(r'^', include('signup.urls.views.accounts')),
]
| 42.021739 | 78 | 0.758924 |
4a22b5eadcb6275a54207f40bd53dfe025d98ba2 | 2,907 | py | Python | proyectos/models.py | luisfarfan/django_intranetapp | bb7e0c08706d8fdf9b1eae437baa1f5850dbd0bd | [
"Apache-2.0"
] | null | null | null | proyectos/models.py | luisfarfan/django_intranetapp | bb7e0c08706d8fdf9b1eae437baa1f5850dbd0bd | [
"Apache-2.0"
] | null | null | null | proyectos/models.py | luisfarfan/django_intranetapp | bb7e0c08706d8fdf9b1eae437baa1f5850dbd0bd | [
"Apache-2.0"
] | null | null | null | from django.db import models
from sistemas.models import Sistema
from django.contrib import admin
class Proyecto(models.Model):
id_siga = models.IntegerField()
nombre = models.CharField(max_length=100)
sigla = models.CharField(max_length=50, null=True, blank=True)
anio = models.IntegerField()
descripcion = models.TextField(blank=True, null=True)
fecha_inicio = models.DateField(blank=True, null=True)
fecha_fin = models.DateField(blank=True, null=True)
cod_meta = models.CharField(max_length=8)
estado = models.IntegerField(default=1)
usr_creacion = models.CharField(max_length=100, blank=True, null=True)
fec_creacion = models.DateTimeField(blank=True, null=True)
usr_edicion = models.CharField(max_length=100, blank=True, null=True)
fec_edicion = models.DateTimeField(blank=True, null=True)
sistemas = models.ManyToManyField(Sistema, through='ProyectoSistema')
def __unicode__(self):
return '%s , %s' % (self.sigla, self.nombre)
class Meta:
managed = True
db_table = 'PROYECTO'
unique_together = (('id_siga',))
@admin.register(Proyecto)
class ProyectoAdmin(admin.ModelAdmin):
list_display = ('sigla', 'nombre')
class ProyectoSistema(models.Model):
proyectos = models.ForeignKey('Proyecto')
sistemas = models.ForeignKey(Sistema)
usr_creacion = models.CharField(max_length=100, blank=True, null=True)
fec_creacion = models.DateTimeField(blank=True, null=True)
usr_edicion = models.CharField(max_length=100, blank=True, null=True)
fec_edicion = models.DateTimeField(blank=True, null=True)
def __str__(self):
return '%s , %s' % (self.proyectos, self.sistemas)
class Meta:
managed = True
db_table = 'PROYECTO_SISTEMA'
unique_together = (('proyectos', 'sistemas'))
@admin.register(ProyectoSistema)
class ProyectoAdmin(admin.ModelAdmin):
list_display = ('proyectos', 'sistemas')
class ProyectosSiga(models.Model):
id = models.IntegerField(primary_key=True, db_column='id')
annio_meta = models.CharField(db_column='annio_meta', max_length=4, blank=True, null=True)
codi_meta = models.CharField(db_column='codi_meta', max_length=4, blank=True, null=True)
cod_proyecto = models.CharField(db_column='cod_proyecto', max_length=4, blank=True, null=True)
desc_proyecto = models.CharField(db_column='desc_proyecto', max_length=255, blank=True, null=True)
CODI_DEPE_TDE = models.CharField(db_column='CODI_DEPE_TDE', max_length=4, blank=True, null=True)
codi_depe_apro = models.CharField(db_column='codi_depe_apro', max_length=4, blank=True, null=True)
sigla = models.CharField(db_column='sigla', max_length=50, blank=True, null=True)
def __unicode__(self):
return '%s , %s' % (self.codi_meta, self.desc_proyecto)
class Meta:
managed = False
db_table = 'V_PROYECTOS_SIGA'
| 39.283784 | 102 | 0.718266 |
4a22b78013cbb509ef1cb2a708c6e4c54575197d | 20,043 | py | Python | astropy/utils/compat/futures/_base.py | REMeyer/astropy | 28c49fb618538a01812e586cd07bccdf0591a6c6 | [
"BSD-3-Clause"
] | 3 | 2018-03-20T15:09:16.000Z | 2021-05-27T11:17:33.000Z | astropy/utils/compat/futures/_base.py | REMeyer/astropy | 28c49fb618538a01812e586cd07bccdf0591a6c6 | [
"BSD-3-Clause"
] | null | null | null | astropy/utils/compat/futures/_base.py | REMeyer/astropy | 28c49fb618538a01812e586cd07bccdf0591a6c6 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
import logging
import threading
import time
from collections import namedtuple
from ....extern.six.moves import zip
__author__ = 'Brian Quinlan ([email protected])'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
STDERR_HANDLER = logging.StreamHandler()
LOGGER.addHandler(STDERR_HANDLER)
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that `wait` and `as_completed` block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by `as_completed`."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by `wait(return_when=FIRST_COMPLETED)`."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by `wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)`."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
if self.num_pending_calls == len(self.finished_futures):
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of `Future`
conditions.
"""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count,
stop_on_exception=False)
else:
raise ValueError("Invalid return condition: "
"{!r}".format(return_when))
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Parameters
----------
fs
The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout
The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns
-------
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises
------
TimeoutError
If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'{} (of {}) futures unfinished'.format(
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Parameters
----------
fs
The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout
The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when
Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns
-------
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at {} state={} raised {}>'.format(
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at {} state={} returned {}>'.format(
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at {} state={}>'.format(
hex(id(self)), _STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Parameters
----------
fn
A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if (self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED,
FINISHED]):
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Parameters
----------
timeout
The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns
-------
The result of the call that the future represents.
Raises
------
CancelledError
If the future was cancelled.
TimeoutError
If the future didn't finish executing before the given
timeout.
Exception
If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Parameters
----------
timeout
The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns
-------
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises
------
CancelledError
If the future was cancelled.
TimeoutError
If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by `Executor` implementations and unit tests.
If the future has been cancelled (`cancel` was called and returned
True) then any threads waiting on the future completing (though calls
to `as_completed` or `wait`) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to `running` will return True) and True is returned.
This method should be called by `Executor` implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns
-------
False if the Future was cancelled, True otherwise.
Raises
------
RuntimeError
if this method was already called or if `set_result`
or `set_exception` was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self.future),
self.future._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by `Executor` implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by `Executor` implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as `fn(*args, **kwargs)` and
returns a `Future` instance representing the execution of the callable.
Returns
-------
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to `map(fn, iter)`.
Parameters
----------
fn
A callable that will take take as many arguments as there are
passed iterables.
timeout
The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns
-------
An iterator equivalent to: `map(func, *iterables)` but the calls may
be evaluated out-of-order.
Raises
------
TimeoutError
If the entire result iterator could not be generated
before the given timeout.
Exception
If `fn(*args)` raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the `Executor`.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Parameters
----------
wait
If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
| 31.317188 | 79 | 0.601257 |
4a22b78eb15a4ec30f80e107c4b0b1e167a5dfaf | 9,977 | py | Python | examples/text/babi_rnn.py | SoucheLab/TensorFlow2-Examples | c9c6f7b5085795a599de7d9aaf96162981f5d92f | [
"MIT"
] | 5 | 2019-10-25T06:17:42.000Z | 2020-02-13T19:22:01.000Z | examples/text/babi_rnn.py | SoucheLab/TensorFlow2-Examples | c9c6f7b5085795a599de7d9aaf96162981f5d92f | [
"MIT"
] | null | null | null | examples/text/babi_rnn.py | SoucheLab/TensorFlow2-Examples | c9c6f7b5085795a599de7d9aaf96162981f5d92f | [
"MIT"
] | null | null | null | # coding=utf-8
'''
Trains two recurrent neural networks based upon a story and a question.
'''
import tarfile
import re
from functools import reduce
import numpy as np
import tensorflow as tf
from argparse import Namespace
# 指定GPU并配置GPU
PHYSICAL_DEVICES = tf.config.experimental.list_physical_devices('GPU')
if PHYSICAL_DEVICES:
USED_GPUS = PHYSICAL_DEVICES[2:3]
tf.config.experimental.set_visible_devices(devices=USED_GPUS, device_type='GPU')
for tmp_gpu in USED_GPUS:
tf.config.experimental.set_memory_growth(device=tmp_gpu, enable=True)
ARGS = Namespace(
# Training parameters.
embed_hidden_size=50,
sent_hidden_size=100,
query_hidden_size=100,
batch_size=32,
epochs=20,
learning_rate=0.001,
model_path='./babi_rnn.h5',
)
class DataLoader(object):
def __init__(self):
self.train_data, self.val_data = self.gen_data()
def gen_data(self):
# Download data
path = tf.keras.utils.get_file('babi-tasks-v1-2.tar.gz',
origin='https://s3.amazonaws.com/text-datasets/'
'babi_tasks_1-20_v1-2.tar.gz')
# Default QA1 with 1000 samples
# challenge = 'tasks_1-20_v1-2/en/qa1_single-supporting-fact_{}.txt'
# QA1 with 10,000 samples
# challenge = 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt'
# QA2 with 1000 samples
challenge = 'tasks_1-20_v1-2/en/qa2_two-supporting-facts_{}.txt'
# QA2 with 10,000 samples
# challenge = 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt'
with tarfile.open(path) as tar:
train = self.get_stories(tar.extractfile(challenge.format('train')))
test = self.get_stories(tar.extractfile(challenge.format('test')))
vocab = set()
for story, q, answer in train + test:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
# Reserve 0 for masking via pad_sequences
self.vocab_size = len(vocab) + 1
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
self.story_maxlen = max(map(len, (x for x, _, _ in train + test)))
self.query_maxlen = max(map(len, (x for _, x, _ in train + test)))
x, xq, y = self.vectorize_stories(train, word_idx, self.story_maxlen, self.query_maxlen)
tx, txq, ty = self.vectorize_stories(test, word_idx, self.story_maxlen, self.query_maxlen)
return (x, xq, y), (tx, txq, ty)
def tokenize(self, sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split(r'(\W+)', sent) if x.strip()]
def parse_stories(self, lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format
If only_supporting is true,
only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = self.tokenize(q)
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = self.tokenize(line)
story.append(sent)
return data
def get_stories(self, f, only_supporting=False, max_length=None):
'''Given a file name, read the file, retrieve the stories,
and then convert the sentences into a single story.
If max_length is supplied,
any stories longer than max_length tokens will be discarded.
'''
data = self.parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data
if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(self, data, word_idx, story_maxlen, query_maxlen):
xs = []
xqs = []
ys = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
# let's not forget that index 0 is reserved
y = np.zeros(len(word_idx) + 1)
y[word_idx[answer]] = 1
xs.append(x)
xqs.append(xq)
ys.append(y)
return (tf.keras.preprocessing.sequence.pad_sequences(xs, maxlen=story_maxlen),
tf.keras.preprocessing.sequence.pad_sequences(xqs, maxlen=query_maxlen), np.array(ys))
class BabiRNN(object):
def __init__(self):
self.data_loader = DataLoader()
self.model = self.create_model()
def create_model(self):
# sentence input
inputs_0 = tf.keras.layers.Input(shape=(self.data_loader.story_maxlen,), dtype='int32')
x_0 = tf.keras.layers.Embedding(self.data_loader.vocab_size, ARGS.embed_hidden_size)(inputs_0)
x_0 = tf.keras.layers.LSTM(ARGS.sent_hidden_size)(x_0)
# question input
inputs_1 = tf.keras.layers.Input(shape=(self.data_loader.query_maxlen,), dtype='int32')
x_1 = tf.keras.layers.Embedding(self.data_loader.vocab_size, ARGS.embed_hidden_size)(inputs_1)
x_1 = tf.keras.layers.LSTM(ARGS.query_hidden_size)(x_1)
x = tf.keras.layers.concatenate([x_0, x_1])
outputs = tf.keras.layers.Dense(self.data_loader.vocab_size, activation='softmax')(x)
return tf.keras.models.Model([inputs_0, inputs_1], outputs)
def get_dataset(self, data, is_training=False, return_steps=False):
x_data, xq_data, y_data = data
tmp_dataset = tf.data.Dataset.from_tensor_slices(((x_data, xq_data), y_data))
if is_training:
tmp_dataset = tmp_dataset.shuffle(buffer_size=1024).batch(ARGS.batch_size)
else:
tmp_dataset = tmp_dataset.batch(ARGS.batch_size)
if return_steps:
if x_data.shape[0] % ARGS.batch_size == 0:
tmp_steps = x_data.shape[0] // ARGS.batch_size
else:
tmp_steps = x_data.shape[0] // ARGS.batch_size + 1
return tmp_dataset, tmp_steps
else:
return tmp_dataset
# custom training loop
def train(self):
# instantiate an optimizer to train the model.
optimizer = tf.keras.optimizers.Adam(learning_rate=ARGS.learning_rate)
# instantiate a loss function.
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
# prepare the metrics.
train_acc_metric = tf.keras.metrics.CategoricalAccuracy()
val_acc_metric = tf.keras.metrics.CategoricalAccuracy()
# prepare the training dataset.
train_dataset, train_steps = self.get_dataset(self.data_loader.train_data, is_training=True,
return_steps=True)
# Prepare the validation dataset.
val_dataset, val_steps = self.get_dataset(self.data_loader.val_data, return_steps=True)
# Iterate over epochs.
best_val_acc = 0.
for epoch in range(ARGS.epochs):
print('*********************')
print('Epoch {} training...'.format(epoch))
training_bar = tf.keras.utils.Progbar(train_steps, stateful_metrics=['loss', 'acc'])
# Iterate over the batches of the dataset.
for train_step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = self.model(x_batch_train)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, self.model.trainable_weights)
optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
# Update training metric.
train_acc_metric(y_batch_train, logits)
# Logging
training_bar.update(train_step + 1,
values=[('loss', float(loss_value)), ('acc', float(train_acc_metric.result()))])
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
validating_bar = tf.keras.utils.Progbar(val_steps, stateful_metrics=['val_acc'])
# Run a validation loop at the end of each epoch.
for val_step, (x_batch_val, y_batch_val) in enumerate(val_dataset):
val_logits = self.model(x_batch_val)
# Update val metrics
val_acc_metric(y_batch_val, val_logits)
# Logging
validating_bar.update(val_step + 1, values=[('val_acc', float(val_acc_metric.result()))])
val_acc = val_acc_metric.result()
# Save the best model with the highest verification accuracy
if val_acc > best_val_acc:
print('model saving...')
# todo tf.saved_model.save
# normal
self.model.save_weights(ARGS.model_path)
# # new
# tf.saved_model.save(self.model, args.model_dir_path)
best_val_acc = val_acc
val_acc_metric.reset_states()
if __name__ == '__main__':
tmp_model = BabiRNN()
tmp_model.train()
| 42.097046 | 116 | 0.601784 |
4a22b7dbc07084ae25b46e5205452881fde9b892 | 1,536 | py | Python | i3-chrome-tab-dragging.py | moritzhoewer/i3-chrome-tab-dragging | c3abd0933e5c82106cd1cd44ed8d9f078a6f59a8 | [
"MIT"
] | 20 | 2020-03-29T16:59:54.000Z | 2022-03-14T11:05:09.000Z | i3-chrome-tab-dragging.py | moritzhoewer/i3-chrome-tab-dragging | c3abd0933e5c82106cd1cd44ed8d9f078a6f59a8 | [
"MIT"
] | 4 | 2020-03-29T20:28:25.000Z | 2022-01-22T14:28:15.000Z | i3-chrome-tab-dragging.py | moritzhoewer/i3-chrome-tab-dragging | c3abd0933e5c82106cd1cd44ed8d9f078a6f59a8 | [
"MIT"
] | 3 | 2020-03-29T16:56:33.000Z | 2020-05-14T10:46:00.000Z | #!/usr/bin/env python3
# vi:expandtab tabstop=4
# This is intended to be run when i3 starts it will exit on restart; make sure
# to use "exec_always --no-startup-id" to run this
from pynput.mouse import Listener, Button
from i3ipc import Connection, Event
# Constants
browser_classes = [
"Google-chrome",
"Chromium",
"Brave-browser",
]
# Global Variables
mousePressed = False
currentWindow = None
# Called by mouse listener when the mouse is clicked
def on_click(x, y, button, pressed):
global mousePressed
global currentWindow
# we want to store the status of the left mouse button
if button == Button.left:
mousePressed = pressed
# if the button is released and we were currently dragging a window, unfloat it
if not pressed and currentWindow:
currentWindow.command('floating disable')
currentWindow = None
# Called by i3 when a new window is created
def on_window_new(i3, e):
global currentWindow
# we only care about chromium windows
if e.container.window_class in browser_classes:
# only switch to floating mode if the user is currently dragging (=mouse button pressed)
if mousePressed:
e.container.command('floating enable')
# store the reference to the window, so we can unfloat it later
currentWindow = e.container
##############
# Main Logic #
##############
i3 = Connection()
i3.on(Event.WINDOW_NEW, on_window_new)
with Listener(on_click=on_click) as listener:
i3.main()
| 27.927273 | 96 | 0.686198 |
4a22bc0687d552e1b794261fcc3e402fa5a0e209 | 38,458 | py | Python | src/silx/gui/plot/tools/profile/manager.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 94 | 2016-03-04T17:25:53.000Z | 2022-03-18T18:05:23.000Z | src/silx/gui/plot/tools/profile/manager.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 2,841 | 2016-01-21T09:06:49.000Z | 2022-03-18T14:53:56.000Z | src/silx/gui/plot/tools/profile/manager.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 71 | 2015-09-30T08:35:35.000Z | 2022-03-16T07:16:28.000Z | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2018-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This module provides a manager to compute and display profiles.
"""
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "28/06/2018"
import logging
import weakref
from silx.gui import qt
from silx.gui import colors
from silx.gui import utils
from silx.utils.weakref import WeakMethodProxy
from silx.gui import icons
from silx.gui.plot import PlotWidget
from silx.gui.plot.tools.roi import RegionOfInterestManager
from silx.gui.plot.tools.roi import CreateRoiModeAction
from silx.gui.plot import items
from silx.gui.qt import silxGlobalThreadPool
from silx.gui.qt import inspect
from . import rois
from . import core
from . import editors
_logger = logging.getLogger(__name__)
class _RunnableComputeProfile(qt.QRunnable):
"""Runner to process profiles
:param qt.QThreadPool threadPool: The thread which will be used to
execute this runner. It is used to update the used signals
:param ~silx.gui.plot.items.Item item: Item in which the profile is
computed
:param ~silx.gui.plot.tools.profile.core.ProfileRoiMixIn roi: ROI
defining the profile shape and other characteristics
"""
class _Signals(qt.QObject):
"""Signal holder"""
resultReady = qt.Signal(object, object)
runnerFinished = qt.Signal(object)
def __init__(self, threadPool, item, roi):
"""Constructor
"""
super(_RunnableComputeProfile, self).__init__()
self._signals = self._Signals()
self._signals.moveToThread(threadPool.thread())
self._item = item
self._roi = roi
self._cancelled = False
def _lazyCancel(self):
"""Cancel the runner if it is not yet started.
The threadpool will still execute the runner, but this will process
nothing.
This is only used with Qt<5.9 where QThreadPool.tryTake is not available.
"""
self._cancelled = True
def autoDelete(self):
return False
def getRoi(self):
"""Returns the ROI in which the runner will compute a profile.
:rtype: ~silx.gui.plot.tools.profile.core.ProfileRoiMixIn
"""
return self._roi
@property
def resultReady(self):
"""Signal emitted when the result of the computation is available.
This signal provides 2 values: The ROI, and the computation result.
"""
return self._signals.resultReady
@property
def runnerFinished(self):
"""Signal emitted when runner have finished.
This signal provides a single value: the runner itself.
"""
return self._signals.runnerFinished
def run(self):
"""Process the profile computation.
"""
if not self._cancelled:
try:
profileData = self._roi.computeProfile(self._item)
except Exception:
_logger.error("Error while computing profile", exc_info=True)
else:
self.resultReady.emit(self._roi, profileData)
self.runnerFinished.emit(self)
class ProfileWindow(qt.QMainWindow):
"""
Display a computed profile.
The content can be described using :meth:`setRoiProfile` if the source of
the profile is a profile ROI, and :meth:`setProfile` for the data content.
"""
sigClose = qt.Signal()
"""Emitted by :meth:`closeEvent` (e.g. when the window is closed
through the window manager's close icon)."""
def __init__(self, parent=None, backend=None):
qt.QMainWindow.__init__(self, parent=parent, flags=qt.Qt.Dialog)
self.setWindowTitle('Profile window')
self._plot1D = None
self._plot2D = None
self._backend = backend
self._data = None
widget = qt.QWidget()
self._layout = qt.QStackedLayout(widget)
self._layout.setContentsMargins(0, 0, 0, 0)
self.setCentralWidget(widget)
def prepareWidget(self, roi):
"""Called before the show to prepare the window to use with
a specific ROI."""
if isinstance(roi, rois._DefaultImageStackProfileRoiMixIn):
profileType = roi.getProfileType()
else:
profileType = "1D"
if profileType == "1D":
self.getPlot1D()
elif profileType == "2D":
self.getPlot2D()
def createPlot1D(self, parent, backend):
"""Inherit this function to create your own plot to render 1D
profiles. The default value is a `Plot1D`.
:param parent: The parent of this widget or None.
:param backend: The backend to use for the plot.
See :class:`PlotWidget` for the list of supported backend.
:rtype: PlotWidget
"""
# import here to avoid circular import
from ...PlotWindow import Plot1D
plot = Plot1D(parent=parent, backend=backend)
plot.setDataMargins(yMinMargin=0.1, yMaxMargin=0.1)
plot.setGraphYLabel('Profile')
plot.setGraphXLabel('')
return plot
def createPlot2D(self, parent, backend):
"""Inherit this function to create your own plot to render 2D
profiles. The default value is a `Plot2D`.
:param parent: The parent of this widget or None.
:param backend: The backend to use for the plot.
See :class:`PlotWidget` for the list of supported backend.
:rtype: PlotWidget
"""
# import here to avoid circular import
from ...PlotWindow import Plot2D
return Plot2D(parent=parent, backend=backend)
def getPlot1D(self, init=True):
"""Return the current plot used to display curves and create it if it
does not yet exists and `init` is True. Else returns None."""
if not init:
return self._plot1D
if self._plot1D is None:
self._plot1D = self.createPlot1D(self, self._backend)
self._layout.addWidget(self._plot1D)
return self._plot1D
def _showPlot1D(self):
plot = self.getPlot1D()
self._layout.setCurrentWidget(plot)
def getPlot2D(self, init=True):
"""Return the current plot used to display image and create it if it
does not yet exists and `init` is True. Else returns None."""
if not init:
return self._plot2D
if self._plot2D is None:
self._plot2D = self.createPlot2D(parent=self, backend=self._backend)
self._layout.addWidget(self._plot2D)
return self._plot2D
def _showPlot2D(self):
plot = self.getPlot2D()
self._layout.setCurrentWidget(plot)
def getCurrentPlotWidget(self):
return self._layout.currentWidget()
def closeEvent(self, qCloseEvent):
self.sigClose.emit()
qCloseEvent.accept()
def setRoiProfile(self, roi):
"""Set the profile ROI which it the source of the following data
to display.
:param ProfileRoiMixIn roi: The profile ROI data source
"""
if roi is None:
return
self.__color = colors.rgba(roi.getColor())
def _setImageProfile(self, data):
"""
Setup the window to display a new profile data which is represented
by an image.
:param core.ImageProfileData data: Computed data profile
"""
plot = self.getPlot2D()
plot.clear()
plot.setGraphTitle(data.title)
plot.getXAxis().setLabel(data.xLabel)
coords = data.coords
colormap = data.colormap
profileScale = (coords[-1] - coords[0]) / data.profile.shape[1], 1
plot.addImage(data.profile,
legend="profile",
colormap=colormap,
origin=(coords[0], 0),
scale=profileScale)
plot.getYAxis().setLabel("Frame index (depth)")
self._showPlot2D()
def _setCurveProfile(self, data):
"""
Setup the window to display a new profile data which is represented
by a curve.
:param core.CurveProfileData data: Computed data profile
"""
plot = self.getPlot1D()
plot.clear()
plot.setGraphTitle(data.title)
plot.getXAxis().setLabel(data.xLabel)
plot.getYAxis().setLabel(data.yLabel)
plot.addCurve(data.coords,
data.profile,
legend="level",
color=self.__color)
self._showPlot1D()
def _setRgbaProfile(self, data):
"""
Setup the window to display a new profile data which is represented
by a curve.
:param core.RgbaProfileData data: Computed data profile
"""
plot = self.getPlot1D()
plot.clear()
plot.setGraphTitle(data.title)
plot.getXAxis().setLabel(data.xLabel)
plot.getYAxis().setLabel(data.yLabel)
self._showPlot1D()
plot.addCurve(data.coords, data.profile,
legend="level", color="black")
plot.addCurve(data.coords, data.profile_r,
legend="red", color="red")
plot.addCurve(data.coords, data.profile_g,
legend="green", color="green")
plot.addCurve(data.coords, data.profile_b,
legend="blue", color="blue")
if data.profile_a is not None:
plot.addCurve(data.coords, data.profile_a, legend="alpha", color="gray")
def clear(self):
"""Clear the window profile"""
plot = self.getPlot1D(init=False)
if plot is not None:
plot.clear()
plot = self.getPlot2D(init=False)
if plot is not None:
plot.clear()
def getProfile(self):
"""Returns the profile data which is displayed"""
return self.__data
def setProfile(self, data):
"""
Setup the window to display a new profile data.
This method dispatch the result to a specific method according to the
data type.
:param data: Computed data profile
"""
self.__data = data
if data is None:
self.clear()
elif isinstance(data, core.ImageProfileData):
self._setImageProfile(data)
elif isinstance(data, core.RgbaProfileData):
self._setRgbaProfile(data)
elif isinstance(data, core.CurveProfileData):
self._setCurveProfile(data)
else:
raise TypeError("Unsupported type %s" % type(data))
class _ClearAction(qt.QAction):
"""Action to clear the profile manager
The action is only enabled if something can be cleaned up.
"""
def __init__(self, parent, profileManager):
super(_ClearAction, self).__init__(parent)
self.__profileManager = weakref.ref(profileManager)
icon = icons.getQIcon('profile-clear')
self.setIcon(icon)
self.setText('Clear profile')
self.setToolTip('Clear the profiles')
self.setCheckable(False)
self.setEnabled(False)
self.triggered.connect(profileManager.clearProfile)
plot = profileManager.getPlotWidget()
roiManager = profileManager.getRoiManager()
plot.sigInteractiveModeChanged.connect(self.__modeUpdated)
roiManager.sigRoiChanged.connect(self.__roiListUpdated)
def getProfileManager(self):
return self.__profileManager()
def __roiListUpdated(self):
self.__update()
def __modeUpdated(self, source):
self.__update()
def __update(self):
profileManager = self.getProfileManager()
if profileManager is None:
return
roiManager = profileManager.getRoiManager()
if roiManager is None:
return
enabled = roiManager.isStarted() or len(roiManager.getRois()) > 0
self.setEnabled(enabled)
class _StoreLastParamBehavior(qt.QObject):
"""This object allow to store and restore the properties of the ROI
profiles"""
def __init__(self, parent):
assert isinstance(parent, ProfileManager)
super(_StoreLastParamBehavior, self).__init__(parent=parent)
self.__properties = {}
self.__profileRoi = None
self.__filter = utils.LockReentrant()
def _roi(self):
"""Return the spied ROI"""
if self.__profileRoi is None:
return None
roi = self.__profileRoi()
if roi is None:
self.__profileRoi = None
return roi
def setProfileRoi(self, roi):
"""Set a profile ROI to spy.
:param ProfileRoiMixIn roi: A profile ROI
"""
previousRoi = self._roi()
if previousRoi is roi:
return
if previousRoi is not None:
previousRoi.sigProfilePropertyChanged.disconnect(self._profilePropertyChanged)
self.__profileRoi = None if roi is None else weakref.ref(roi)
if roi is not None:
roi.sigProfilePropertyChanged.connect(self._profilePropertyChanged)
def _profilePropertyChanged(self):
"""Handle changes on the properties defining the profile ROI.
"""
if self.__filter.locked():
return
roi = self.sender()
self.storeProperties(roi)
def storeProperties(self, roi):
if isinstance(roi, (rois._DefaultImageStackProfileRoiMixIn,
rois.ProfileImageStackCrossROI)):
self.__properties["method"] = roi.getProfileMethod()
self.__properties["line-width"] = roi.getProfileLineWidth()
self.__properties["type"] = roi.getProfileType()
elif isinstance(roi, (rois._DefaultImageProfileRoiMixIn,
rois.ProfileImageCrossROI)):
self.__properties["method"] = roi.getProfileMethod()
self.__properties["line-width"] = roi.getProfileLineWidth()
elif isinstance(roi, (rois._DefaultScatterProfileRoiMixIn,
rois.ProfileScatterCrossROI)):
self.__properties["npoints"] = roi.getNPoints()
def restoreProperties(self, roi):
with self.__filter:
if isinstance(roi, (rois._DefaultImageStackProfileRoiMixIn,
rois.ProfileImageStackCrossROI)):
value = self.__properties.get("method", None)
if value is not None:
roi.setProfileMethod(value)
value = self.__properties.get("line-width", None)
if value is not None:
roi.setProfileLineWidth(value)
value = self.__properties.get("type", None)
if value is not None:
roi.setProfileType(value)
elif isinstance(roi, (rois._DefaultImageProfileRoiMixIn,
rois.ProfileImageCrossROI)):
value = self.__properties.get("method", None)
if value is not None:
roi.setProfileMethod(value)
value = self.__properties.get("line-width", None)
if value is not None:
roi.setProfileLineWidth(value)
elif isinstance(roi, (rois._DefaultScatterProfileRoiMixIn,
rois.ProfileScatterCrossROI)):
value = self.__properties.get("npoints", None)
if value is not None:
roi.setNPoints(value)
class ProfileManager(qt.QObject):
"""Base class for profile management tools
:param plot: :class:`~silx.gui.plot.PlotWidget` on which to operate.
:param plot: :class:`~silx.gui.plot.tools.roi.RegionOfInterestManager`
on which to operate.
"""
def __init__(self, parent=None, plot=None, roiManager=None):
super(ProfileManager, self).__init__(parent)
assert isinstance(plot, PlotWidget)
self._plotRef = weakref.ref(
plot, WeakMethodProxy(self.__plotDestroyed))
# Set-up interaction manager
if roiManager is None:
roiManager = RegionOfInterestManager(plot)
self._roiManagerRef = weakref.ref(roiManager)
self._rois = []
self._pendingRunners = []
"""List of ROIs which have to be updated"""
self.__reentrantResults = {}
"""Store reentrant result to avoid to skip some of them
cause the implementation uses a QEventLoop."""
self._profileWindowClass = ProfileWindow
"""Class used to display the profile results"""
self._computedProfiles = 0
"""Statistics for tests"""
self.__itemTypes = []
"""Kind of items to use"""
self.__tracking = False
"""Is the plot active items are tracked"""
self.__useColorFromCursor = True
"""If true, force the ROI color with the colormap marker color"""
self._item = None
"""The selected item"""
self.__singleProfileAtATime = True
"""When it's true, only a single profile is displayed at a time."""
self._previousWindowGeometry = []
self._storeProperties = _StoreLastParamBehavior(self)
"""If defined the profile properties of the last ROI are reused to the
new created ones"""
# Listen to plot limits changed
plot.getXAxis().sigLimitsChanged.connect(self.requestUpdateAllProfile)
plot.getYAxis().sigLimitsChanged.connect(self.requestUpdateAllProfile)
roiManager.sigInteractiveModeFinished.connect(self.__interactionFinished)
roiManager.sigInteractiveRoiCreated.connect(self.__roiCreated)
roiManager.sigRoiAdded.connect(self.__roiAdded)
roiManager.sigRoiAboutToBeRemoved.connect(self.__roiRemoved)
def setSingleProfile(self, enable):
"""
Enable or disable the single profile mode.
In single mode, the manager enforce a single ROI at the same
time. A new one will remove the previous one.
If this mode is not enabled, many ROIs can be created, and many
profile windows will be displayed.
"""
self.__singleProfileAtATime = enable
def isSingleProfile(self):
"""
Returns true if the manager is in a single profile mode.
:rtype: bool
"""
return self.__singleProfileAtATime
def __interactionFinished(self):
"""Handle end of interactive mode"""
pass
def __roiAdded(self, roi):
"""Handle new ROI"""
# Filter out non profile ROIs
if not isinstance(roi, core.ProfileRoiMixIn):
return
self.__addProfile(roi)
def __roiRemoved(self, roi):
"""Handle removed ROI"""
# Filter out non profile ROIs
if not isinstance(roi, core.ProfileRoiMixIn):
return
self.__removeProfile(roi)
def createProfileAction(self, profileRoiClass, parent=None):
"""Create an action from a class of ProfileRoi
:param core.ProfileRoiMixIn profileRoiClass: A class of a profile ROI
:param qt.QObject parent: The parent of the created action.
:rtype: qt.QAction
"""
if not issubclass(profileRoiClass, core.ProfileRoiMixIn):
raise TypeError("Type %s not expected" % type(profileRoiClass))
roiManager = self.getRoiManager()
action = CreateRoiModeAction(parent, roiManager, profileRoiClass)
if hasattr(profileRoiClass, "ICON"):
action.setIcon(icons.getQIcon(profileRoiClass.ICON))
if hasattr(profileRoiClass, "NAME"):
def articulify(word):
"""Add an an/a article in the front of the word"""
first = word[1] if word[0] == 'h' else word[0]
if first in "aeiou":
return "an " + word
return "a " + word
action.setText('Define %s' % articulify(profileRoiClass.NAME))
action.setToolTip('Enables %s selection mode' % profileRoiClass.NAME)
action.setSingleShot(True)
return action
def createClearAction(self, parent):
"""Create an action to clean up the plot from the profile ROIs.
:param qt.QObject parent: The parent of the created action.
:rtype: qt.QAction
"""
action = _ClearAction(parent, self)
return action
def createImageActions(self, parent):
"""Create actions designed for image items. This actions created
new ROIs.
:param qt.QObject parent: The parent of the created action.
:rtype: List[qt.QAction]
"""
profileClasses = [
rois.ProfileImageHorizontalLineROI,
rois.ProfileImageVerticalLineROI,
rois.ProfileImageLineROI,
rois.ProfileImageDirectedLineROI,
rois.ProfileImageCrossROI,
]
return [self.createProfileAction(pc, parent=parent) for pc in profileClasses]
def createScatterActions(self, parent):
"""Create actions designed for scatter items. This actions created
new ROIs.
:param qt.QObject parent: The parent of the created action.
:rtype: List[qt.QAction]
"""
profileClasses = [
rois.ProfileScatterHorizontalLineROI,
rois.ProfileScatterVerticalLineROI,
rois.ProfileScatterLineROI,
rois.ProfileScatterCrossROI,
]
return [self.createProfileAction(pc, parent=parent) for pc in profileClasses]
def createScatterSliceActions(self, parent):
"""Create actions designed for regular scatter items. This actions
created new ROIs.
This ROIs was designed to use the input data without interpolation,
like you could do with an image.
:param qt.QObject parent: The parent of the created action.
:rtype: List[qt.QAction]
"""
profileClasses = [
rois.ProfileScatterHorizontalSliceROI,
rois.ProfileScatterVerticalSliceROI,
rois.ProfileScatterCrossSliceROI,
]
return [self.createProfileAction(pc, parent=parent) for pc in profileClasses]
def createImageStackActions(self, parent):
"""Create actions designed for stack image items. This actions
created new ROIs.
This ROIs was designed to create both profile on the displayed image
and profile on the full stack (2D result).
:param qt.QObject parent: The parent of the created action.
:rtype: List[qt.QAction]
"""
profileClasses = [
rois.ProfileImageStackHorizontalLineROI,
rois.ProfileImageStackVerticalLineROI,
rois.ProfileImageStackLineROI,
rois.ProfileImageStackCrossROI,
]
return [self.createProfileAction(pc, parent=parent) for pc in profileClasses]
def createEditorAction(self, parent):
"""Create an action containing GUI to edit the selected profile ROI.
:param qt.QObject parent: The parent of the created action.
:rtype: qt.QAction
"""
action = editors.ProfileRoiEditorAction(parent)
action.setRoiManager(self.getRoiManager())
return action
def setItemType(self, image=False, scatter=False):
"""Set the item type to use and select the active one.
:param bool image: Image item are allowed
:param bool scatter: Scatter item are allowed
"""
self.__itemTypes = []
plot = self.getPlotWidget()
item = None
if image:
self.__itemTypes.append("image")
item = plot.getActiveImage()
if scatter:
self.__itemTypes.append("scatter")
if item is None:
item = plot.getActiveScatter()
self.setPlotItem(item)
def setProfileWindowClass(self, profileWindowClass):
"""Set the class which will be instantiated to display profile result.
"""
self._profileWindowClass = profileWindowClass
def setActiveItemTracking(self, tracking):
"""Enable/disable the tracking of the active item of the plot.
:param bool tracking: Tracking mode
"""
if self.__tracking == tracking:
return
plot = self.getPlotWidget()
if self.__tracking:
plot.sigActiveImageChanged.disconnect(self._activeImageChanged)
plot.sigActiveScatterChanged.disconnect(self._activeScatterChanged)
self.__tracking = tracking
if self.__tracking:
plot.sigActiveImageChanged.connect(self.__activeImageChanged)
plot.sigActiveScatterChanged.connect(self.__activeScatterChanged)
def setDefaultColorFromCursorColor(self, enabled):
"""Enabled/disable the use of the colormap cursor color to display the
ROIs.
If set, the manager will update the color of the profile ROIs using the
current colormap cursor color from the selected item.
"""
self.__useColorFromCursor = enabled
def __activeImageChanged(self, previous, legend):
"""Handle plot item selection"""
if "image" in self.__itemTypes:
plot = self.getPlotWidget()
item = plot.getImage(legend)
self.setPlotItem(item)
def __activeScatterChanged(self, previous, legend):
"""Handle plot item selection"""
if "scatter" in self.__itemTypes:
plot = self.getPlotWidget()
item = plot.getScatter(legend)
self.setPlotItem(item)
def __roiCreated(self, roi):
"""Handle ROI creation"""
# Filter out non profile ROIs
if isinstance(roi, core.ProfileRoiMixIn):
if self._storeProperties is not None:
# Initialize the properties with the previous ones
self._storeProperties.restoreProperties(roi)
def __addProfile(self, profileRoi):
"""Add a new ROI to the manager."""
if profileRoi.getFocusProxy() is None:
if self._storeProperties is not None:
# Follow changes on properties
self._storeProperties.setProfileRoi(profileRoi)
if self.__singleProfileAtATime:
# FIXME: It would be good to reuse the windows to avoid blinking
self.clearProfile()
profileRoi._setProfileManager(self)
self._updateRoiColor(profileRoi)
self._rois.append(profileRoi)
self.requestUpdateProfile(profileRoi)
def __removeProfile(self, profileRoi):
"""Remove a ROI from the manager."""
window = self._disconnectProfileWindow(profileRoi)
if window is not None:
geometry = window.geometry()
if not geometry.isEmpty():
self._previousWindowGeometry.append(geometry)
self.clearProfileWindow(window)
if profileRoi in self._rois:
self._rois.remove(profileRoi)
def _disconnectProfileWindow(self, profileRoi):
"""Handle profile window close."""
window = profileRoi.getProfileWindow()
profileRoi.setProfileWindow(None)
return window
def clearProfile(self):
"""Clear the associated ROI profile"""
roiManager = self.getRoiManager()
for roi in list(self._rois):
if roi.getFocusProxy() is not None:
# Skip sub ROIs, it will be removed by their parents
continue
roiManager.removeRoi(roi)
if not roiManager.isDrawing():
# Clean the selected mode
roiManager.stop()
def hasPendingOperations(self):
"""Returns true if a thread is still computing or displaying a profile.
:rtype: bool
"""
return len(self.__reentrantResults) > 0 or len(self._pendingRunners) > 0
def requestUpdateAllProfile(self):
"""Request to update the profile of all the managed ROIs.
"""
for roi in self._rois:
self.requestUpdateProfile(roi)
def requestUpdateProfile(self, profileRoi):
"""Request to update a specific profile ROI.
:param ~core.ProfileRoiMixIn profileRoi:
"""
if profileRoi.computeProfile is None:
return
threadPool = silxGlobalThreadPool()
# Clean up deprecated runners
for runner in list(self._pendingRunners):
if not inspect.isValid(runner):
self._pendingRunners.remove(runner)
continue
if runner.getRoi() is profileRoi:
if hasattr(threadPool, "tryTake"):
if threadPool.tryTake(runner):
self._pendingRunners.remove(runner)
else: # Support Qt<5.9
runner._lazyCancel()
item = self.getPlotItem()
if item is None or not isinstance(item, profileRoi.ITEM_KIND):
# This item is not compatible with this profile
profileRoi._setPlotItem(None)
profileWindow = profileRoi.getProfileWindow()
if profileWindow is not None:
profileWindow.setProfile(None)
return
profileRoi._setPlotItem(item)
runner = _RunnableComputeProfile(threadPool, item, profileRoi)
runner.runnerFinished.connect(self.__cleanUpRunner)
runner.resultReady.connect(self.__displayResult)
self._pendingRunners.append(runner)
threadPool.start(runner)
def __cleanUpRunner(self, runner):
"""Remove a thread pool runner from the list of hold tasks.
Called at the termination of the runner.
"""
if runner in self._pendingRunners:
self._pendingRunners.remove(runner)
def __displayResult(self, roi, profileData):
"""Display the result of a ROI.
:param ~core.ProfileRoiMixIn profileRoi: A managed ROI
:param ~core.CurveProfileData profileData: Computed data profile
"""
if roi in self.__reentrantResults:
# Store the data to process it in the main loop
# And not a sub loop created by initProfileWindow
# This also remove the duplicated requested
self.__reentrantResults[roi] = profileData
return
self.__reentrantResults[roi] = profileData
self._computedProfiles = self._computedProfiles + 1
window = roi.getProfileWindow()
if window is None:
plot = self.getPlotWidget()
window = self.createProfileWindow(plot, roi)
# roi.profileWindow have to be set before initializing the window
# Cause the initialization is using QEventLoop
roi.setProfileWindow(window)
self.initProfileWindow(window, roi)
window.show()
lastData = self.__reentrantResults.pop(roi)
window.setProfile(lastData)
def __plotDestroyed(self, ref):
"""Handle finalization of PlotWidget
:param ref: weakref to the plot
"""
self._plotRef = None
self._roiManagerRef = None
self._pendingRunners = []
def setPlotItem(self, item):
"""Set the plot item focused by the profile manager.
:param ~silx.gui.plot.items.Item item: A plot item
"""
previous = self.getPlotItem()
if previous is item:
return
if item is None:
self._item = None
else:
item.sigItemChanged.connect(self.__itemChanged)
self._item = weakref.ref(item)
self._updateRoiColors()
self.requestUpdateAllProfile()
def getDefaultColor(self, item):
"""Returns the default ROI color to use according to the given item.
:param ~silx.gui.plot.items.item.Item item: AN item
:rtype: qt.QColor
"""
color = 'pink'
if isinstance(item, items.ColormapMixIn):
colormap = item.getColormap()
name = colormap.getName()
if name is not None:
color = colors.cursorColorForColormap(name)
color = colors.asQColor(color)
return color
def _updateRoiColors(self):
"""Update ROI color according to the item selection"""
if not self.__useColorFromCursor:
return
item = self.getPlotItem()
color = self.getDefaultColor(item)
for roi in self._rois:
roi.setColor(color)
def _updateRoiColor(self, roi):
"""Update a specific ROI according to the current selected item.
:param RegionOfInterest roi: The ROI to update
"""
if not self.__useColorFromCursor:
return
item = self.getPlotItem()
color = self.getDefaultColor(item)
roi.setColor(color)
def __itemChanged(self, changeType):
"""Handle item changes.
"""
if changeType in (items.ItemChangedType.DATA,
items.ItemChangedType.MASK,
items.ItemChangedType.POSITION,
items.ItemChangedType.SCALE):
self.requestUpdateAllProfile()
elif changeType == (items.ItemChangedType.COLORMAP):
self._updateRoiColors()
def getPlotItem(self):
"""Returns the item focused by the profile manager.
:rtype: ~silx.gui.plot.items.Item
"""
if self._item is None:
return None
item = self._item()
if item is None:
self._item = None
return item
def getPlotWidget(self):
"""The plot associated to the profile manager.
:rtype: ~silx.gui.plot.PlotWidget
"""
if self._plotRef is None:
return None
plot = self._plotRef()
if plot is None:
self._plotRef = None
return plot
def getCurrentRoi(self):
"""Returns the currently selected ROI, else None.
:rtype: core.ProfileRoiMixIn
"""
roiManager = self.getRoiManager()
if roiManager is None:
return None
roi = roiManager.getCurrentRoi()
if not isinstance(roi, core.ProfileRoiMixIn):
return None
return roi
def getRoiManager(self):
"""Returns the used ROI manager
:rtype: RegionOfInterestManager
"""
return self._roiManagerRef()
def createProfileWindow(self, plot, roi):
"""Create a new profile window.
:param ~core.ProfileRoiMixIn roi: The plot containing the raw data
:param ~core.ProfileRoiMixIn roi: A managed ROI
:rtype: ~ProfileWindow
"""
return self._profileWindowClass(plot)
def initProfileWindow(self, profileWindow, roi):
"""This function is called just after the profile window creation in
order to initialize the window location.
:param ~ProfileWindow profileWindow:
The profile window to initialize.
"""
# Enforce the use of one of the widgets
# To have the correct window size
profileWindow.prepareWidget(roi)
profileWindow.adjustSize()
# Trick to avoid blinking while retrieving the right window size
# Display the window, hide it and wait for some event loops
profileWindow.show()
profileWindow.hide()
eventLoop = qt.QEventLoop(self)
for _ in range(10):
if not eventLoop.processEvents():
break
profileWindow.show()
if len(self._previousWindowGeometry) > 0:
geometry = self._previousWindowGeometry.pop()
profileWindow.setGeometry(geometry)
return
window = self.getPlotWidget().window()
winGeom = window.frameGeometry()
if qt.BINDING in ("PySide2", "PyQt5"):
qapp = qt.QApplication.instance()
desktop = qapp.desktop()
screenGeom = desktop.availableGeometry(window)
else: # Qt6 (and also Qt>=5.14)
screenGeom = window.screen().availableGeometry()
spaceOnLeftSide = winGeom.left()
spaceOnRightSide = screenGeom.width() - winGeom.right()
profileGeom = profileWindow.frameGeometry()
profileWidth = profileGeom.width()
# Align vertically to the center of the window
top = winGeom.top() + (winGeom.height() - profileGeom.height()) // 2
margin = 5
if profileWidth < spaceOnRightSide:
# Place profile on the right
left = winGeom.right() + margin
elif profileWidth < spaceOnLeftSide:
# Place profile on the left
left = max(0, winGeom.left() - profileWidth - margin)
else:
# Move it as much as possible where there is more space
if spaceOnLeftSide > spaceOnRightSide:
left = 0
else:
left = screenGeom.width() - profileGeom.width()
profileWindow.move(left, top)
def clearProfileWindow(self, profileWindow):
"""Called when a profile window is not anymore needed.
By default the window will be closed. But it can be
inherited to change this behavior.
"""
profileWindow.deleteLater()
| 35.609259 | 90 | 0.621847 |
4a22bc17148d681878ca132d169b721d5a9e4257 | 1,472 | py | Python | analysis_liveTrade/data_definition.py | rindhane/stock_options | 8dcf324c9203102cd53549945740d213ce3bbfcf | [
"MIT"
] | null | null | null | analysis_liveTrade/data_definition.py | rindhane/stock_options | 8dcf324c9203102cd53549945740d213ce3bbfcf | [
"MIT"
] | null | null | null | analysis_liveTrade/data_definition.py | rindhane/stock_options | 8dcf324c9203102cd53549945740d213ce3bbfcf | [
"MIT"
] | null | null | null | from utilities.general.class_builders import self_setup_class
from utilities.data_analysis import (
Data,
Source_Json)
import pandas
import os
#---------constants--------------
DATA1=os.path.dirname(__file__)+'/test.json'
#-----------------------
sources= {
'test':Source_Json(name='test',path=DATA1)
}
#-------------------
column_mapper= {
0:'timestamp',
1: 'open',
2: 'high',
3: 'low',
4: 'close',
5: 'volume',
6: 'not-applicable'
}
def processor(col, schema):
details= schema[col.name]
return col.apply(details[0], **(details[1] if details[1:] else {}))
def process_nothing(col):
return col
process_dict= {
'timestamp': [pandas.Timestamp],
'open': [process_nothing], #already float / integers and no nan values
'high': [process_nothing], #already float / integers and no nan values
'low': [process_nothing], #already float / integers and no nan values
'close': [process_nothing], #already float / integers and no nan values
'volume': [process_nothing], #already float / integers and no nan values
'not-applicable': [process_nothing], #already float / integers and no nan values
}
def clean_preprocessing(df,process_dict=process_dict,**kwargs):
df = df.rename(columns=column_mapper)
df=df.apply(processor, **{'schema':process_dict})
return df
| 29.44 | 84 | 0.595788 |
4a22bc2bcb47b128e97392693aaabf99de70023c | 15,065 | py | Python | superset/connectors/druid/views.py | Visortech-Solutions/incubator-superset | 4b33597e521e07d1ec74cdbda761e103814f60a2 | [
"Apache-2.0"
] | 1 | 2020-08-31T17:22:25.000Z | 2020-08-31T17:22:25.000Z | superset/connectors/druid/views.py | Visortech-Solutions/incubator-superset | 4b33597e521e07d1ec74cdbda761e103814f60a2 | [
"Apache-2.0"
] | 1 | 2020-08-02T04:42:57.000Z | 2020-08-02T04:42:57.000Z | superset/connectors/druid/views.py | Visortech-Solutions/incubator-superset | 4b33597e521e07d1ec74cdbda761e103814f60a2 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-ancestors
import json
import logging
from datetime import datetime
from flask import flash, Markup, redirect
from flask_appbuilder import CompactCRUDMixin, expose
from flask_appbuilder.fieldwidgets import Select2Widget
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access
from flask_babel import lazy_gettext as _
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from superset import db, security_manager
from superset.connectors.base.views import DatasourceModelView
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.druid import models
from superset.constants import RouteMethod
from superset.typing import FlaskResponse
from superset.utils import core as utils
from superset.views.base import (
BaseSupersetView,
DatasourceFilter,
DeleteMixin,
get_datasource_exist_error_msg,
ListWidgetWithCheckboxes,
SupersetModelView,
validate_json,
YamlExportMixin,
)
logger = logging.getLogger(__name__)
class DruidColumnInlineView(CompactCRUDMixin, SupersetModelView):
datamodel = SQLAInterface(models.DruidColumn)
include_route_methods = RouteMethod.RELATED_VIEW_SET
list_title = _("Columns")
show_title = _("Show Druid Column")
add_title = _("Add Druid Column")
edit_title = _("Edit Druid Column")
list_widget = ListWidgetWithCheckboxes
edit_columns = [
"column_name",
"verbose_name",
"description",
"dimension_spec_json",
"datasource",
"groupby",
"filterable",
]
add_columns = edit_columns
list_columns = ["column_name", "verbose_name", "type", "groupby", "filterable"]
can_delete = False
page_size = 500
label_columns = {
"column_name": _("Column"),
"type": _("Type"),
"datasource": _("Datasource"),
"groupby": _("Groupable"),
"filterable": _("Filterable"),
}
description_columns = {
"filterable": _(
"Whether this column is exposed in the `Filters` section "
"of the explore view."
),
"dimension_spec_json": utils.markdown(
"this field can be used to specify "
"a `dimensionSpec` as documented [here]"
"(http://druid.io/docs/latest/querying/dimensionspecs.html). "
"Make sure to input valid JSON and that the "
"`outputName` matches the `column_name` defined "
"above.",
True,
),
}
add_form_extra_fields = {
"datasource": QuerySelectField(
"Datasource",
query_factory=lambda: db.session().query(models.DruidDatasource),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
def pre_update(self, item: "DruidColumnInlineView") -> None:
# If a dimension spec JSON is given, ensure that it is
# valid JSON and that `outputName` is specified
if item.dimension_spec_json:
try:
dimension_spec = json.loads(item.dimension_spec_json)
except ValueError as ex:
raise ValueError("Invalid Dimension Spec JSON: " + str(ex))
if not isinstance(dimension_spec, dict):
raise ValueError("Dimension Spec must be a JSON object")
if "outputName" not in dimension_spec:
raise ValueError("Dimension Spec does not contain `outputName`")
if "dimension" not in dimension_spec:
raise ValueError("Dimension Spec is missing `dimension`")
# `outputName` should be the same as the `column_name`
if dimension_spec["outputName"] != item.column_name:
raise ValueError(
"`outputName` [{}] unequal to `column_name` [{}]".format(
dimension_spec["outputName"], item.column_name
)
)
def post_update(self, item: "DruidColumnInlineView") -> None:
item.refresh_metrics()
def post_add(self, item: "DruidColumnInlineView") -> None:
self.post_update(item)
class DruidMetricInlineView(CompactCRUDMixin, SupersetModelView):
datamodel = SQLAInterface(models.DruidMetric)
include_route_methods = RouteMethod.RELATED_VIEW_SET
list_title = _("Metrics")
show_title = _("Show Druid Metric")
add_title = _("Add Druid Metric")
edit_title = _("Edit Druid Metric")
list_columns = ["metric_name", "verbose_name", "metric_type"]
edit_columns = [
"metric_name",
"description",
"verbose_name",
"metric_type",
"json",
"datasource",
"d3format",
"warning_text",
]
add_columns = edit_columns
page_size = 500
validators_columns = {"json": [validate_json]}
description_columns = {
"metric_type": utils.markdown(
"use `postagg` as the metric type if you are defining a "
"[Druid Post Aggregation]"
"(http://druid.io/docs/latest/querying/post-aggregations.html)",
True,
)
}
label_columns = {
"metric_name": _("Metric"),
"description": _("Description"),
"verbose_name": _("Verbose Name"),
"metric_type": _("Type"),
"json": _("JSON"),
"datasource": _("Druid Datasource"),
"warning_text": _("Warning Message"),
}
add_form_extra_fields = {
"datasource": QuerySelectField(
"Datasource",
query_factory=lambda: db.session().query(models.DruidDatasource),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
class DruidClusterModelView(SupersetModelView, DeleteMixin, YamlExportMixin):
datamodel = SQLAInterface(models.DruidCluster)
include_route_methods = RouteMethod.CRUD_SET
list_title = _("Druid Clusters")
show_title = _("Show Druid Cluster")
add_title = _("Add Druid Cluster")
edit_title = _("Edit Druid Cluster")
add_columns = [
"verbose_name",
"broker_host",
"broker_port",
"broker_user",
"broker_pass",
"broker_endpoint",
"cache_timeout",
"cluster_name",
]
edit_columns = add_columns
list_columns = ["cluster_name", "metadata_last_refreshed"]
search_columns = ("cluster_name",)
label_columns = {
"cluster_name": _("Cluster Name"),
"broker_host": _("Broker Host"),
"broker_port": _("Broker Port"),
"broker_user": _("Broker Username"),
"broker_pass": _("Broker Password"),
"broker_endpoint": _("Broker Endpoint"),
"verbose_name": _("Verbose Name"),
"cache_timeout": _("Cache Timeout"),
"metadata_last_refreshed": _("Metadata Last Refreshed"),
}
description_columns = {
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this cluster. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the global timeout if undefined."
),
"broker_user": _(
"Druid supports basic authentication. See "
"[auth](http://druid.io/docs/latest/design/auth.html) and "
"druid-basic-security extension"
),
"broker_pass": _(
"Druid supports basic authentication. See "
"[auth](http://druid.io/docs/latest/design/auth.html) and "
"druid-basic-security extension"
),
}
yaml_dict_key = "databases"
def pre_add(self, item: "DruidClusterModelView") -> None:
security_manager.add_permission_view_menu("database_access", item.perm)
def pre_update(self, item: "DruidClusterModelView") -> None:
self.pre_add(item)
def _delete(self, pk: int) -> None:
DeleteMixin._delete(self, pk)
class DruidDatasourceModelView(DatasourceModelView, DeleteMixin, YamlExportMixin):
datamodel = SQLAInterface(models.DruidDatasource)
include_route_methods = RouteMethod.CRUD_SET
list_title = _("Druid Datasources")
show_title = _("Show Druid Datasource")
add_title = _("Add Druid Datasource")
edit_title = _("Edit Druid Datasource")
list_columns = ["datasource_link", "cluster", "changed_by_", "modified"]
order_columns = ["datasource_link", "modified"]
related_views = [DruidColumnInlineView, DruidMetricInlineView]
edit_columns = [
"datasource_name",
"cluster",
"description",
"owners",
"is_hidden",
"filter_select_enabled",
"fetch_values_from",
"default_endpoint",
"offset",
"cache_timeout",
]
search_columns = ("datasource_name", "cluster", "description", "owners")
add_columns = edit_columns
show_columns = add_columns + ["perm", "slices"]
page_size = 500
base_order = ("datasource_name", "asc")
description_columns = {
"slices": _(
"The list of charts associated with this table. By "
"altering this datasource, you may change how these associated "
"charts behave. "
"Also note that charts need to point to a datasource, so "
"this form will fail at saving if removing charts from a "
"datasource. If you want to change the datasource for a chart, "
"overwrite the chart from the 'explore view'"
),
"offset": _("Timezone offset (in hours) for this datasource"),
"description": Markup(
'Supports <a href="'
'https://daringfireball.net/projects/markdown/">markdown</a>'
),
"fetch_values_from": _(
"Time expression to use as a predicate when retrieving "
"distinct values to populate the filter component. "
"Only applies when `Enable Filter Select` is on. If "
"you enter `7 days ago`, the distinct list of values in "
"the filter will be populated based on the distinct value over "
"the past week"
),
"filter_select_enabled": _(
"Whether to populate the filter's dropdown in the explore "
"view's filter section with a list of distinct values fetched "
"from the backend on the fly"
),
"default_endpoint": _(
"Redirects to this endpoint when clicking on the datasource "
"from the datasource list"
),
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this datasource. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the cluster timeout if undefined."
),
}
base_filters = [["id", DatasourceFilter, lambda: []]]
label_columns = {
"slices": _("Associated Charts"),
"datasource_link": _("Data Source"),
"cluster": _("Cluster"),
"description": _("Description"),
"owners": _("Owners"),
"is_hidden": _("Is Hidden"),
"filter_select_enabled": _("Enable Filter Select"),
"default_endpoint": _("Default Endpoint"),
"offset": _("Time Offset"),
"cache_timeout": _("Cache Timeout"),
"datasource_name": _("Datasource Name"),
"fetch_values_from": _("Fetch Values From"),
"changed_by_": _("Changed By"),
"modified": _("Modified"),
}
def pre_add(self, item: "DruidDatasourceModelView") -> None:
with db.session.no_autoflush:
query = db.session.query(models.DruidDatasource).filter(
models.DruidDatasource.datasource_name == item.datasource_name,
models.DruidDatasource.cluster_id == item.cluster_id,
)
if db.session.query(query.exists()).scalar():
raise Exception(get_datasource_exist_error_msg(item.full_name))
def post_add(self, item: "DruidDatasourceModelView") -> None:
item.refresh_metrics()
security_manager.add_permission_view_menu("datasource_access", item.get_perm())
if item.schema:
security_manager.add_permission_view_menu("schema_access", item.schema_perm)
def post_update(self, item: "DruidDatasourceModelView") -> None:
self.post_add(item)
def _delete(self, pk: int) -> None:
DeleteMixin._delete(self, pk)
class Druid(BaseSupersetView):
"""The base views for Superset!"""
@has_access
@expose("/refresh_datasources/")
def refresh_datasources( # pylint: disable=no-self-use
self, refresh_all: bool = True
) -> FlaskResponse:
"""endpoint that refreshes druid datasources metadata"""
DruidCluster = ConnectorRegistry.sources[ # pylint: disable=invalid-name
"druid"
].cluster_class
for cluster in db.session.query(DruidCluster).all():
cluster_name = cluster.cluster_name
valid_cluster = True
try:
cluster.refresh_datasources(refresh_all=refresh_all)
except Exception as ex: # pylint: disable=broad-except
valid_cluster = False
flash(
"Error while processing cluster '{}'\n{}".format(
cluster_name, utils.error_msg_from_exception(ex)
),
"danger",
)
logger.exception(ex)
if valid_cluster:
cluster.metadata_last_refreshed = datetime.now()
flash(
_("Refreshed metadata from cluster [{}]").format(
cluster.cluster_name
),
"info",
)
db.session.commit()
return redirect("/druiddatasourcemodelview/list/")
@has_access
@expose("/scan_new_datasources/")
def scan_new_datasources(self) -> FlaskResponse:
"""
Calling this endpoint will cause a scan for new
datasources only and add them.
"""
return self.refresh_datasources(refresh_all=False)
| 37.289604 | 88 | 0.627946 |
4a22bc4ed8129dac80ccf03075cfd2cb8144d144 | 4,734 | py | Python | homeassistant/components/hunterdouglas_powerview/__init__.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | homeassistant/components/hunterdouglas_powerview/__init__.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | homeassistant/components/hunterdouglas_powerview/__init__.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | """The Hunter Douglas PowerView integration."""
import logging
from aiopvapi.helpers.aiorequest import AioRequest
from aiopvapi.helpers.api_base import ApiEntryPoint
from aiopvapi.helpers.tools import base64_to_unicode
from aiopvapi.rooms import Rooms
from aiopvapi.scenes import Scenes
from aiopvapi.shades import Shades
from aiopvapi.userdata import UserData
import async_timeout
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import (
API_PATH_FWVERSION,
DEFAULT_LEGACY_MAINPROCESSOR,
DOMAIN,
FIRMWARE,
FIRMWARE_MAINPROCESSOR,
FIRMWARE_NAME,
HUB_EXCEPTIONS,
HUB_NAME,
MAC_ADDRESS_IN_USERDATA,
ROOM_DATA,
SCENE_DATA,
SERIAL_NUMBER_IN_USERDATA,
SHADE_DATA,
USER_DATA,
)
from .coordinator import PowerviewShadeUpdateCoordinator
from .model import PowerviewDeviceInfo, PowerviewEntryData
from .shade_data import PowerviewShadeData
from .util import async_map_data_by_id
PARALLEL_UPDATES = 1
CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)
PLATFORMS = [Platform.BUTTON, Platform.COVER, Platform.SCENE, Platform.SENSOR]
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Hunter Douglas PowerView from a config entry."""
config = entry.data
hub_address = config[CONF_HOST]
websession = async_get_clientsession(hass)
pv_request = AioRequest(hub_address, loop=hass.loop, websession=websession)
try:
async with async_timeout.timeout(10):
device_info = await async_get_device_info(pv_request, hub_address)
async with async_timeout.timeout(10):
rooms = Rooms(pv_request)
room_data = async_map_data_by_id((await rooms.get_resources())[ROOM_DATA])
async with async_timeout.timeout(10):
scenes = Scenes(pv_request)
scene_data = async_map_data_by_id(
(await scenes.get_resources())[SCENE_DATA]
)
async with async_timeout.timeout(10):
shades = Shades(pv_request)
shade_entries = await shades.get_resources()
shade_data = async_map_data_by_id(shade_entries[SHADE_DATA])
except HUB_EXCEPTIONS as err:
raise ConfigEntryNotReady(
f"Connection error to PowerView hub: {hub_address}: {err}"
) from err
if not device_info:
raise ConfigEntryNotReady(f"Unable to initialize PowerView hub: {hub_address}")
coordinator = PowerviewShadeUpdateCoordinator(hass, shades, hub_address)
coordinator.async_set_updated_data(PowerviewShadeData())
# populate raw shade data into the coordinator for diagnostics
coordinator.data.store_group_data(shade_entries[SHADE_DATA])
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = PowerviewEntryData(
api=pv_request,
room_data=room_data,
scene_data=scene_data,
shade_data=shade_data,
coordinator=coordinator,
device_info=device_info,
)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_get_device_info(
pv_request: AioRequest, hub_address: str
) -> PowerviewDeviceInfo:
"""Determine device info."""
userdata = UserData(pv_request)
resources = await userdata.get_resources()
userdata_data = resources[USER_DATA]
if FIRMWARE in userdata_data:
main_processor_info = userdata_data[FIRMWARE][FIRMWARE_MAINPROCESSOR]
elif userdata_data:
# Legacy devices
fwversion = ApiEntryPoint(pv_request, API_PATH_FWVERSION)
resources = await fwversion.get_resources()
if FIRMWARE in resources:
main_processor_info = resources[FIRMWARE][FIRMWARE_MAINPROCESSOR]
else:
main_processor_info = DEFAULT_LEGACY_MAINPROCESSOR
return PowerviewDeviceInfo(
name=base64_to_unicode(userdata_data[HUB_NAME]),
mac_address=userdata_data[MAC_ADDRESS_IN_USERDATA],
serial_number=userdata_data[SERIAL_NUMBER_IN_USERDATA],
firmware=main_processor_info,
model=main_processor_info[FIRMWARE_NAME],
hub_address=hub_address,
)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| 33.814286 | 87 | 0.741022 |
4a22bcb559cc3125095f4d73fd2ca3b0b60ba261 | 2,991 | py | Python | mbuild/atb_client.py | dcardenasv/mbuild | 20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727 | [
"MIT"
] | null | null | null | mbuild/atb_client.py | dcardenasv/mbuild | 20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727 | [
"MIT"
] | null | null | null | mbuild/atb_client.py | dcardenasv/mbuild | 20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727 | [
"MIT"
] | null | null | null | import httplib2
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
import urllib
import warnings
class SearchResultHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.molids = {}
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr, v in attrs:
if attr == 'href':
if v.startswith('./molecule.py?molid='):
molid = v[v.find('=')+1:]
self.molids[int(molid)] = "http://compbio.biosci.uq.edu.au/atb"+v[1:]
class AtbClient(object):
def __init__(self):
self.h = httplib2.Http(".cache")
def search(self, query):
url = "http://compbio.biosci.uq.edu.au/atb/index.py?molsPerPage=1000&search={}".format(
query)
resp, content = self.h.request(url, "GET")
if resp['status'] != '200':
warnings.warn('HTTP response status is {} for URL "{}"'.format(resp['status'], url))
return None
parser = SearchResultHTMLParser()
parser.feed(content)
return parser.molids
def generate_topology(self, molid, ff_version="53A6",):
query_pairs = {"molid": str(molid), "ffVersion": ff_version,
"outputType": "top", "atbVersion": "v2Top",
"format": "GROMACS"}
query_string = urllib.urlencode(query_pairs)
url = "http://compbio.biosci.uq.edu.au/atb/molecule.py?{}".format(query_string)
# print url
resp, content = self.h.request(url, "GET")
if resp['status'] != '200':
warnings.warn('HTTP response status is {} for URL "{}"'.format(resp['status'], url))
return None
return content
def retrieve_itp(self, molid, ff_version="53A6", all_atom=True):
self.generate_topology(molid, ff_version)
query_pairs = {"molid": str(molid), "ffVersion": ff_version,
"outputType": "top", "atbVersion": "v2Top"}
if all_atom:
query_pairs["file"] = "rtp_allatom"
else:
query_pairs["file"] = "rtp_uniatom"
query_string = urllib.urlencode(query_pairs)
url = "http://compbio.biosci.uq.edu.au/atb/download.py?{}".format(query_string)
# print url
resp, content = self.h.request(url, "GET")
if resp['status'] != '200':
warnings.warn('HTTP response status is {} for URL "{}"'.format(resp['status'], url))
return None
if not resp['content-type'].startswith('text/plain'):
warnings.warn('Expecting text/plain response, got "{}" for URL "{}"'.format(
resp['content-type'], url))
return None
return content
if __name__ == "__main__":
atb = AtbClient()
results = atb.search("C6H12O6")
for molecule_id, uri in results.iteritems():
print(atb.retrieve_itp(molecule_id))
| 30.835052 | 96 | 0.573387 |
4a22bcb814bb3ca074aea1a62352b3e433bc4faf | 2,760 | py | Python | hyfed-server/pca_server/serializer/pca_serializers.py | AnneHartebrodt/hyfed-pca | 57c009d17d00524f216d57f4fd3fb8732c3fccce | [
"Apache-2.0"
] | 1 | 2021-06-15T17:29:31.000Z | 2021-06-15T17:29:31.000Z | hyfed-server/pca_server/serializer/pca_serializers.py | AnneHartebrodt/hyfed-pca | 57c009d17d00524f216d57f4fd3fb8732c3fccce | [
"Apache-2.0"
] | null | null | null | hyfed-server/pca_server/serializer/pca_serializers.py | AnneHartebrodt/hyfed-pca | 57c009d17d00524f216d57f4fd3fb8732c3fccce | [
"Apache-2.0"
] | null | null | null | """
Pca project serializer to serialize project specific fields
Copyright 2021 'My Name'. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import serializers
from hyfed_server.serializer.hyfed_serializers import HyFedProjectSerializer
class PcaProjectSerializer(HyFedProjectSerializer):
""" Serializes the Pca project model to serve a WebApp/client request """
max_iterations = serializers.SerializerMethodField()
max_dimensions = serializers.SerializerMethodField()
center = serializers.SerializerMethodField()
scale_variance = serializers.SerializerMethodField()
log2 = serializers.SerializerMethodField()
federated_qr = serializers.SerializerMethodField()
send_final_result = serializers.SerializerMethodField()
current_iteration = serializers.SerializerMethodField()
epsilon = serializers.SerializerMethodField()
speedup = serializers.SerializerMethodField()
use_smpc = serializers.SerializerMethodField()
def get_max_iterations(self, instance):
return instance.max_iterations
def get_max_dimensions(self, instance):
return instance.max_dimensions
def get_center(self, instance):
return instance.center
def get_scale_variance(self, instance):
return instance.scale_variance
def get_log2(self, instance):
return instance.log2
def get_federated_qr(self, instance):
return instance.federated_qr
def get_send_final_result(self, instance):
return instance.send_final_result
def get_current_iteration(self, instance):
return instance.current_iteration
def get_epsilon(self, instance):
return instance.epsilon
def get_speedup(self, instance):
return instance.speedup
def get_use_smpc(self, instance):
return instance.use_smpc
class Meta(HyFedProjectSerializer.Meta):
fields = HyFedProjectSerializer.Meta.fields + ('max_iterations', 'max_dimensions', 'center',
'scale_variance', 'log2', 'federated_qr', 'send_final_result',
'current_iteration', 'epsilon','speedup', 'use_smpc',)
| 36.8 | 117 | 0.717391 |
4a22bd218540f794edc37a37267f98b3597afa6a | 24,145 | py | Python | test/functional/rpc_rawtransaction.py | CommanderXanon/Xcoin | 58c8daa1346e1b5f563d457c475c7f2804596551 | [
"MIT"
] | null | null | null | test/functional/rpc_rawtransaction.py | CommanderXanon/Xcoin | 58c8daa1346e1b5f563d457c475c7f2804596551 | [
"MIT"
] | 1 | 2020-06-20T00:39:21.000Z | 2020-06-21T02:32:33.000Z | test/functional/rpc_rawtransaction.py | CommanderXanon/Xcoin | 58c8daa1346e1b5f563d457c475c7f2804596551 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import XcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(XcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Xcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Two data outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([('data', '99'), ('data', '99')])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{'data': '99'}, {'data': '99'}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), ('data', '99'), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {'data': '99'}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
# Test `signrawtransactionwithwallet` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 XCN to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
if __name__ == '__main__':
RawTransactionsTest().main()
| 54.626697 | 233 | 0.64738 |
4a22bd5fad9da20694985c416fe9a602feea21cf | 15,905 | py | Python | quaternion_layers/norm.py | heheqianqian/DeepQuaternionNetworks | 199d261f080896c9408e771f980b8a98e159f847 | [
"MIT"
] | null | null | null | quaternion_layers/norm.py | heheqianqian/DeepQuaternionNetworks | 199d261f080896c9408e771f980b8a98e159f847 | [
"MIT"
] | 1 | 2020-01-03T17:03:45.000Z | 2020-01-04T00:02:46.000Z | quaternion_layers/norm.py | heheqianqian/DeepQuaternionNetworks | 199d261f080896c9408e771f980b8a98e159f847 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Authors: Chase Gaudet
# code based on work by Chiheb Trabelsi
# on Deep Complex Networks git source
#
# Implementation of Layer Normalization and Quaternion Layer Normalization
import numpy as np
from keras.layers import Layer, InputSpec
from keras import initializers, regularizers, constraints
import keras.backend as K
from .bn import QuaternionBN as quaternion_normalization
from .bn import sqrt_init
def layernorm(x, axis, epsilon, gamma, beta):
# assert self.built, 'Layer must be built before being called'
input_shape = K.shape(x)
reduction_axes = list(range(K.ndim(x)))
del reduction_axes[axis]
del reduction_axes[0]
broadcast_shape = [1] * K.ndim(x)
broadcast_shape[axis] = input_shape[axis]
broadcast_shape[0] = K.shape(x)[0]
# Perform normalization: centering and reduction
mean = K.mean(x, axis=reduction_axes)
broadcast_mean = K.reshape(mean, broadcast_shape)
x_centred = x - broadcast_mean
variance = K.mean(x_centred ** 2, axis=reduction_axes) + epsilon
broadcast_variance = K.reshape(variance, broadcast_shape)
x_normed = x_centred / K.sqrt(broadcast_variance)
# Perform scaling and shifting
broadcast_shape_params = [1] * K.ndim(x)
broadcast_shape_params[axis] = K.shape(x)[axis]
broadcast_gamma = K.reshape(gamma, broadcast_shape_params)
broadcast_beta = K.reshape(beta, broadcast_shape_params)
x_LN = broadcast_gamma * x_normed + broadcast_beta
return x_LN
class LayerNormalization(Layer):
def __init__(self,
epsilon=1e-4,
axis=-1,
beta_init='zeros',
gamma_init='ones',
gamma_regularizer=None,
beta_regularizer=None,
**kwargs):
self.supports_masking = True
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.epsilon = epsilon
self.axis = axis
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
super(LayerNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = InputSpec(ndim=len(input_shape),
axes={self.axis: input_shape[self.axis]})
shape = (input_shape[self.axis],)
self.gamma = self.add_weight(shape,
initializer=self.gamma_init,
regularizer=self.gamma_regularizer,
name='{}_gamma'.format(self.name))
self.beta = self.add_weight(shape,
initializer=self.beta_init,
regularizer=self.beta_regularizer,
name='{}_beta'.format(self.name))
self.built = True
def call(self, x, mask=None):
assert self.built, 'Layer must be built before being called'
return layernorm(x, self.axis, self.epsilon, self.gamma, self.beta)
def get_config(self):
config = {'epsilon': self.epsilon,
'axis': self.axis,
'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None,
'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class QuaternionLayerNorm(Layer):
def __init__(self,
epsilon=1e-4,
axis=-1,
center=True,
scale=True,
beta_initializer='zeros',
gamma_diag_initializer=sqrt_init,
gamma_off_initializer='zeros',
beta_regularizer=None,
gamma_diag_regularizer=None,
gamma_off_regularizer=None,
beta_constraint=None,
gamma_diag_constraint=None,
gamma_off_constraint=None,
**kwargs):
self.supports_masking = True
self.epsilon = epsilon
self.axis = axis
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_diag_initializer = initializers.get(gamma_diag_initializer)
self.gamma_off_initializer = initializers.get(gamma_off_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_diag_regularizer = regularizers.get(gamma_diag_regularizer)
self.gamma_off_regularizer = regularizers.get(gamma_off_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_diag_constraint = constraints.get(gamma_diag_constraint)
self.gamma_off_constraint = constraints.get(gamma_off_constraint)
super(QuaternionLayerNorm, self).__init__(**kwargs)
def build(self, input_shape):
ndim = len(input_shape)
dim = input_shape[self.axis]
if dim is None:
raise ValueError('Axis ' + str(self.axis) + ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' +
str(input_shape) + '.')
self.input_spec = InputSpec(ndim=len(input_shape),
axes={self.axis: dim})
gamma_shape = (input_shape[self.axis] // 4,)
if self.scale:
self.gamma_rr = self.add_weight(
shape=gamma_shape,
name='gamma_rr',
initializer=self.gamma_diag_initializer,
regularizer=self.gamma_diag_regularizer,
constraint=self.gamma_diag_constraint
)
self.gamma_ii = self.add_weight(
shape=gamma_shape,
name='gamma_ii',
initializer=self.gamma_diag_initializer,
regularizer=self.gamma_diag_regularizer,
constraint=self.gamma_diag_constraint
)
self.gamma_jj = self.add_weight(
shape=gamma_shape,
name='gamma_jj',
initializer=self.gamma_diag_initializer,
regularizer=self.gamma_diag_regularizer,
constraint=self.gamma_diag_constraint
)
self.gamma_kk = self.add_weight(
shape=gamma_shape,
name='gamma_kk',
initializer=self.gamma_diag_initializer,
regularizer=self.gamma_diag_regularizer,
constraint=self.gamma_diag_constraint
)
self.gamma_ri = self.add_weight(
shape=gamma_shape,
name='gamma_ri',
initializer=self.gamma_off_initializer,
regularizer=self.gamma_off_regularizer,
constraint=self.gamma_off_constraint
)
self.gamma_rj = self.add_weight(
shape=gamma_shape,
name='gamma_rj',
initializer=self.gamma_off_initializer,
regularizer=self.gamma_off_regularizer,
constraint=self.gamma_off_constraint
)
self.gamma_rk = self.add_weight(
shape=gamma_shape,
name='gamma_rk',
initializer=self.gamma_off_initializer,
regularizer=self.gamma_off_regularizer,
constraint=self.gamma_off_constraint
)
self.gamma_ij = self.add_weight(
shape=gamma_shape,
name='gamma_ij',
initializer=self.gamma_off_initializer,
regularizer=self.gamma_off_regularizer,
constraint=self.gamma_off_constraint
)
self.gamma_ik = self.add_weight(
shape=gamma_shape,
name='gamma_ik',
initializer=self.gamma_off_initializer,
regularizer=self.gamma_off_regularizer,
constraint=self.gamma_off_constraint
)
self.gamma_jk = self.add_weight(
shape=gamma_shape,
name='gamma_jk',
initializer=self.gamma_off_initializer,
regularizer=self.gamma_off_regularizer,
constraint=self.gamma_off_constraint
)
else:
self.gamma_rr = None
self.gamma_ii = None
self.gamma_jj = None
self.gamma_kk = None
self.gamma_ri = None
self.gamma_rj = None
self.gamma_rk = None
self.gamma_ij = None
self.gamma_ik = None
self.gamma_jk = None
if self.center:
self.beta = self.add_weight(shape=(input_shape[self.axis],),
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.built = True
def call(self, inputs):
input_shape = K.shape(inputs)
ndim = K.ndim(inputs)
reduction_axes = list(range(ndim))
del reduction_axes[self.axis]
del reduction_axes[0]
input_dim = input_shape[self.axis] // 4
mu = K.mean(inputs, axis=reduction_axes)
broadcast_mu_shape = [1] * ndim
broadcast_mu_shape[self.axis] = input_shape[self.axis]
broadcast_mu_shape[0] = K.shape(inputs)[0]
broadcast_mu = K.reshape(mu, broadcast_mu_shape)
if self.center:
input_centred = inputs - broadcast_mu
else:
input_centred = inputs
centred_squared = input_centred ** 2
if (self.axis == 1 and ndim != 3) or ndim == 2:
centred_squared_r = centred_squared[:, :input_dim]
centred_squared_i = centred_squared[:, input_dim:input_dim*2]
centred_squared_j = centred_squared[:, input_dim*2:input_dim*3]
centred_squared_k = centred_squared[:, input_dim*3:]
centred_r = input_centred[:, :input_dim]
centred_i = input_centred[:, input_dim:input_dim*2]
centred_j = input_centred[:, input_dim*2:input_dim*3]
centred_k = input_centred[:, input_dim*3:]
elif ndim == 3:
centred_squared_r = centred_squared[:, :, :input_dim]
centred_squared_i = centred_squared[:, :, input_dim:input_dim*2]
centred_squared_j = centred_squared[:, :, input_dim*2:input_dim*3]
centred_squared_k = centred_squared[:, :, input_dim*3:]
centred_r = input_centred[:, :, :input_dim]
centred_i = input_centred[:, :, input_dim:input_dim*2]
centred_j = input_centred[:, :, input_dim*2:input_dim*3]
centred_k = input_centred[:, :, input_dim*3:]
elif self.axis == -1 and ndim == 4:
centred_squared_r = centred_squared[:, :, :, :input_dim]
centred_squared_i = centred_squared[:, :, :, input_dim:input_dim*2]
centred_squared_j = centred_squared[:, :, :, input_dim*2:input_dim*3]
centred_squared_k = centred_squared[:, :, :, input_dim*3:]
centred_r = input_centred[:, :, :, :input_dim]
centred_i = input_centred[:, :, :, input_dim:input_dim*2]
centred_j = input_centred[:, :, :, input_dim*2:input_dim*3]
centred_k = input_centred[:, :, :, input_dim*3:]
elif self.axis == -1 and ndim == 5:
centred_squared_r = centred_squared[:, :, :, :, :input_dim]
centred_squared_i = centred_squared[:, :, :, :, input_dim:input_dim*2]
centred_squared_j = centred_squared[:, :, :, :, input_dim*2:input_dim*3]
centred_squared_k = centred_squared[:, :, :, :, input_dim*3:]
centred_r = input_centred[:, :, :, :, :input_dim]
centred_i = input_centred[:, :, :, :, input_dim:input_dim*2]
centred_j = input_centred[:, :, :, :, input_dim*2:input_dim*3]
centred_k = input_centred[:, :, :, :, input_dim*3:]
else:
raise ValueError(
'Incorrect Layernorm combination of axis and dimensions. axis should be either 1 or -1. '
'axis: ' + str(self.axis) + '; ndim: ' + str(ndim) + '.'
)
if self.scale:
Vrr = K.mean(
centred_squared_r,
axis=reduction_axes
) + self.epsilon
Vii = K.mean(
centred_squared_i,
axis=reduction_axes
) + self.epsilon
Vjj = K.mean(
centred_squared_j,
axis=reduction_axes
) + self.epsilon
Vkk = K.mean(
centred_squared_k,
axis=reduction_axes
) + self.epsilon
Vri = K.mean(
centred_r * centred_i,
axis=reduction_axes,
)
Vrj = K.mean(
centred_r * centred_j,
axis=reduction_axes,
)
Vrk = K.mean(
centred_r * centred_k,
axis=reduction_axes,
)
Vij = K.mean(
centred_i * centred_j,
axis=reduction_axes,
)
Vik = K.mean(
centred_i * centred_k,
axis=reduction_axes,
)
Vjk = K.mean(
centred_j * centred_k,
axis=reduction_axes,
)
elif self.center:
Vrr = None
Vii = None
Vjj = None
Vkk = None
Vri = None
Vrj = None
Vrk = None
Vij = None
Vik = None
Vkk = None
else:
raise ValueError('Error. Both scale and center in batchnorm are set to False.')
return quaternion_normalization(
input_centred,
Vrr, Vri, Vrj, Vrk, Vii,
Vij, Vik, Vjj, Vjk, Vkk,
self.beta,
self.gamma_rr, self.gamma_ri,
self.gamma_rj, self.gamma_rk,
self.gamma_ii, self.gamma_ij,
self.gamma_ik, self.gamma_jj,
self.gamma_jk, self.gamma_kk,
self.scale, self.center,
layernorm=True, axis=self.axis
)
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_diag_initializer': initializers.serialize(self.gamma_diag_initializer),
'gamma_off_initializer': initializers.serialize(self.gamma_off_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_diag_regularizer': regularizers.serialize(self.gamma_diag_regularizer),
'gamma_off_regularizer': regularizers.serialize(self.gamma_off_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_diag_constraint': constraints.serialize(self.gamma_diag_constraint),
'gamma_off_constraint': constraints.serialize(self.gamma_off_constraint),
}
base_config = super(QuaternionLayerNorm, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 41.204663 | 111 | 0.569821 |
4a22bdaf657ee0043b9654a4d83d082c5b111bfe | 12,595 | py | Python | pyleecan/Generator/ClassGenerator/compare_method_generator.py | tobsen2code/pyleecan | 5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9 | [
"Apache-2.0"
] | 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | pyleecan/Generator/ClassGenerator/compare_method_generator.py | ecs-kev/pyleecan | 1faedde4b24acc6361fa1fdd4e980eaec4ca3a62 | [
"Apache-2.0"
] | 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | pyleecan/Generator/ClassGenerator/compare_method_generator.py | ecs-kev/pyleecan | 1faedde4b24acc6361fa1fdd4e980eaec4ca3a62 | [
"Apache-2.0"
] | 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | from ...Generator import TAB, TAB2, TAB3, TAB4, TAB5
from ...Generator.read_fct import is_list_pyleecan_type, is_dict_pyleecan_type
def generate_compare(gen_dict, class_dict):
"""Generate the code for the compare method of the class
Parameters
----------
gen_dict : dict
Dict with key = class name and value = class dict (name, package, properties, methods...)
class_dict : dict
dictionary of the class to generate (keys are name, package, properties, methods...)
Returns
-------
compare_str : str
String containing the code for the compare method of the class
"""
class_name = class_dict["name"]
compare_str = "" # This string is for the generated code
# Code generation
compare_str += TAB + "def compare(self, other, name='self', ignore_list=None):\n"
compare_str += TAB2 + '"""Compare two objects and return list of differences"""\n\n'
# Check the type
compare_str += TAB2 + "if ignore_list is None:\n"
compare_str += TAB3 + "ignore_list = list()\n"
compare_str += TAB2 + "if type(other) != type(self):\n"
compare_str += TAB3 + "return ['type('+name+')']\n"
compare_str += TAB2 + "diff_list = list()\n"
# Call mother eq
if class_dict["mother"] != "":
compare_str += (
"\n"
+ TAB2
+ "# Check the properties inherited from "
+ class_dict["mother"]
+ "\n"
)
compare_str += (
TAB2
+ "diff_list.extend(super("
+ class_name
+ ", self).compare(other,name=name))\n"
)
# Check that all the propoperties (except parent) are equal
for prop in class_dict["properties"]:
if prop["type"] == "ndarray":
compare_str += (
TAB2
+ "if not array_equal(other."
+ prop["name"]
+ ", self."
+ prop["name"]
+ "):\n"
)
compare_str += TAB3 + "diff_list.append(name+'." + prop["name"] + "')\n"
elif prop["type"] in [None, ""]:
compare_str += (
TAB2
+ "if (other."
+ prop["name"]
+ " is None and self."
+ prop["name"]
+ " is not None) or (other."
+ prop["name"]
+ " is not None and self."
+ prop["name"]
+ " is None):\n"
)
compare_str += TAB3 + "diff_list.append(name+'." + prop["name"] + "')\n"
compare_str += TAB2 + "elif self." + prop["name"] + " is None:\n"
compare_str += TAB3 + "pass\n"
compare_str += (
TAB2
+ "elif isinstance(self."
+ prop["name"]
+ ", np.ndarray) and not np.array_equal(other."
+ prop["name"]
+ ", self."
+ prop["name"]
+ "):\n"
)
compare_str += TAB3 + "diff_list.append(name+'." + prop["name"] + "')\n"
compare_str += (
TAB2 + "elif hasattr(self." + prop["name"] + ", 'compare'):\n"
)
compare_str += (
TAB3
+ "diff_list.extend(self."
+ prop["name"]
+ ".compare(other."
+ prop["name"]
+ ",name=name+'."
+ prop["name"]
+ "'))\n"
)
compare_str += (
TAB2
+ "elif other._"
+ prop["name"]
+ " != self._"
+ prop["name"]
+ ":\n"
)
compare_str += TAB3 + "diff_list.append(name+'." + prop["name"] + "')\n"
elif prop["type"] == "[ndarray]":
compare_str += (
TAB2
+ "if (other."
+ prop["name"]
+ " is None and self."
+ prop["name"]
+ " is not None) or (other."
+ prop["name"]
+ " is not None and self."
+ prop["name"]
+ " is None):\n"
)
compare_str += TAB3 + "diff_list.append(name+'." + prop["name"] + "')\n"
compare_str += TAB2 + "elif self." + prop["name"] + " is None:\n"
compare_str += TAB3 + "pass\n"
compare_str += (
TAB2
+ "elif len(other."
+ prop["name"]
+ ") != len(self."
+ prop["name"]
+ "):\n"
)
compare_str += (
TAB3 + "diff_list.append('len('+name+'." + prop["name"] + ")')\n"
)
compare_str += TAB2 + "else:\n"
compare_str += TAB3 + "for ii in range(len(other." + prop["name"] + ")):\n"
compare_str += (
TAB4
+ "if not array_equal(other."
+ prop["name"]
+ "[ii], self."
+ prop["name"]
+ "[ii]):\n"
)
compare_str += (
TAB5 + "diff_list.append(name+'." + prop["name"] + "['+str(ii)+']')\n"
)
elif prop["type"] == "{ndarray}":
compare_str += (
TAB2
+ "if (other."
+ prop["name"]
+ " is None and self."
+ prop["name"]
+ " is not None) or (other."
+ prop["name"]
+ " is not None and self."
+ prop["name"]
+ " is None):\n"
)
compare_str += (
TAB3 + "diff_list.append(name+'." + prop["name"] + " None mismatch')\n"
)
compare_str += TAB2 + "elif self." + prop["name"] + " is None:\n"
compare_str += TAB3 + "pass\n"
compare_str += (
TAB2
+ "elif len(other."
+ prop["name"]
+ ") != len(self."
+ prop["name"]
+ "):\n"
)
compare_str += (
TAB3 + "diff_list.append('len('+name+'." + prop["name"] + ")')\n"
)
compare_str += TAB2 + "else:\n"
compare_str += TAB3 + "for key in other." + prop["name"] + ":\n"
compare_str += (
TAB4
+ "if key not in self."
+ prop["name"]
+ " or not array_equal(other."
+ prop["name"]
+ "[key], self."
+ prop["name"]
+ "[key]):\n"
)
compare_str += (
TAB5 + "diff_list.append(name+'." + prop["name"] + "['+str(key)+']')\n"
)
elif prop["type"] == "function":
compare_str += (
TAB2
+ "if other._"
+ prop["name"]
+ "_str != self._"
+ prop["name"]
+ "_str:\n"
)
compare_str += TAB3 + "diff_list.append(name+'." + prop["name"] + "')\n"
elif prop["type"] in ["str", "int", "float", "bool", "complex", "dict", "list"]:
compare_str += (
TAB2 + "if other._" + prop["name"] + " != self._" + prop["name"] + ":\n"
)
compare_str += TAB3 + "diff_list.append(name+'." + prop["name"] + "')\n"
elif is_list_pyleecan_type(prop["type"]):
compare_str += (
TAB2
+ "if (other."
+ prop["name"]
+ " is None and self."
+ prop["name"]
+ " is not None) or (other."
+ prop["name"]
+ " is not None and self."
+ prop["name"]
+ " is None):\n"
)
compare_str += (
TAB3 + "diff_list.append(name+'." + prop["name"] + " None mismatch')\n"
)
compare_str += TAB2 + "elif self." + prop["name"] + " is None:\n"
compare_str += TAB3 + "pass\n"
compare_str += (
TAB2
+ "elif len(other."
+ prop["name"]
+ ") != len(self."
+ prop["name"]
+ "):\n"
)
compare_str += (
TAB3 + "diff_list.append('len('+name+'." + prop["name"] + ")')\n"
)
compare_str += TAB2 + "else:\n"
compare_str += TAB3 + "for ii in range(len(other." + prop["name"] + ")):\n"
compare_str += (
TAB4
+ "diff_list.extend(self."
+ prop["name"]
+ "[ii].compare(other."
+ prop["name"]
+ "[ii],name=name+'."
+ prop["name"]
+ "['+str(ii)+']'))\n"
)
elif is_dict_pyleecan_type(prop["type"]):
compare_str += (
TAB2
+ "if (other."
+ prop["name"]
+ " is None and self."
+ prop["name"]
+ " is not None) or (other."
+ prop["name"]
+ " is not None and self."
+ prop["name"]
+ " is None):\n"
)
compare_str += (
TAB3 + "diff_list.append(name+'." + prop["name"] + " None mismatch')\n"
)
compare_str += TAB2 + "elif self." + prop["name"] + " is None:\n"
compare_str += TAB3 + "pass\n"
compare_str += (
TAB2
+ "elif len(other."
+ prop["name"]
+ ") != len(self."
+ prop["name"]
+ "):\n"
)
compare_str += (
TAB3 + "diff_list.append('len('+name+'" + prop["name"] + ")')\n"
)
compare_str += TAB2 + "else:\n"
compare_str += TAB3 + "for key in self." + prop["name"] + ":\n"
compare_str += (
TAB4
+ "diff_list.extend(self."
+ prop["name"]
+ "[key].compare(other."
+ prop["name"]
+ "[key],name=name+'."
+ prop["name"]
+ "'))\n"
)
elif "." in prop["type"] and "SciDataTool" not in prop["type"]:
# External type
compare_str += (
TAB2
+ "if (other."
+ prop["name"]
+ " is None and self."
+ prop["name"]
+ " is not None) or (other."
+ prop["name"]
+ " is not None and self."
+ prop["name"]
+ " is None):\n"
)
compare_str += (
TAB3 + "diff_list.append(name+'." + prop["name"] + " None mismatch')\n"
)
compare_str += (
TAB2
+ "elif self."
+ prop["name"]
+ " is not None and self."
+ prop["name"]
+ " != other."
+ prop["name"]
+ ":\n"
)
compare_str += TAB3 + "diff_list.append(name+'." + prop["name"] + "')\n"
else: # pyleecan type
compare_str += (
TAB2
+ "if (other."
+ prop["name"]
+ " is None and self."
+ prop["name"]
+ " is not None) or (other."
+ prop["name"]
+ " is not None and self."
+ prop["name"]
+ " is None):\n"
)
compare_str += (
TAB3 + "diff_list.append(name+'." + prop["name"] + " None mismatch')\n"
)
compare_str += TAB2 + "elif self." + prop["name"] + " is not None:\n"
compare_str += (
TAB3
+ "diff_list.extend(self."
+ prop["name"]
+ ".compare(other."
+ prop["name"]
+ ",name=name+'."
+ prop["name"]
+ "'))\n"
)
compare_str += TAB2 + "# Filter ignore differences\n"
compare_str += (
TAB2 + "diff_list = list(filter(lambda x : x not in ignore_list, diff_list))\n"
)
compare_str += TAB2 + "return diff_list\n"
return compare_str
| 35.679887 | 97 | 0.378722 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.