repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
xuannianc/keras-retinanet | count_files.py | d1da39592042927aaf3b3eb905a308c327983bed | import csv
vat_filenames = set()
train_csv_filename = 'train_annotations.csv'
val_csv_filename = 'val_annotations.csv'
for csv_filename in [train_csv_filename, val_csv_filename]:
for line in csv.reader(open(csv_filename)):
vat_filename = line[0].split('/')[-1]
vat_filenames.add(vat_filename)
print(len(vat_filenames))
vat_filenames.clear()
| [] |
ngi-nix/liberaforms | liberaforms/views/admin.py | 5882994736292e7ab34c4c9207805b307478a6c7 | """
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2020 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
import os, json
from flask import g, request, render_template, redirect
from flask import session, flash, Blueprint
from flask import send_file, after_this_request
from flask_babel import gettext as _
from liberaforms.models.user import User
from liberaforms.models.form import Form
from liberaforms.models.site import Site
from liberaforms.models.invite import Invite
from liberaforms.utils.wraps import *
from liberaforms.utils import utils
from liberaforms.utils.utils import make_url_for, JsonResponse
from liberaforms.utils.dispatcher import Dispatcher
from liberaforms.utils import wtf
from pprint import pprint
admin_bp = Blueprint('admin_bp', __name__,
template_folder='../templates/admin')
@admin_bp.route('/admin', methods=['GET'])
@admin_required
def site_admin():
return render_template('admin-panel.html',
user=g.current_user,
app_version=utils.get_app_version(),
site=g.site)
""" User management """
@admin_bp.route('/admin/users', methods=['GET'])
@admin_required
def list_users():
return render_template('list-users.html',
users=User.find_all(),
invites=Invite.find_all())
@admin_bp.route('/admin/users/<int:id>', methods=['GET'])
@admin_required
def inspect_user(id):
user=User.find(id=id)
if not user:
flash(_("User not found"), 'warning')
return redirect(make_url_for('admin_bp.list_users'))
return render_template('inspect-user.html', user=user)
@admin_bp.route('/admin/users/toggle-blocked/<int:id>', methods=['POST'])
@admin_required
def toggle_user_blocked(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
if user.id == g.current_user.id:
# current_user cannot disable themself
blocked=user.blocked
else:
blocked=user.toggle_blocked()
return JsonResponse(json.dumps({'blocked':blocked}))
@admin_bp.route('/admin/users/toggle-admin/<int:id>', methods=['POST'])
@admin_required
def toggle_admin(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
if user.username == g.current_user.username:
# current_user cannot remove their own admin permission
is_admin=True
else:
is_admin=user.toggle_admin()
return JsonResponse(json.dumps({'admin':is_admin}))
@admin_bp.route('/admin/users/toggle-uploads-enabled/<int:id>', methods=['POST'])
@admin_required
def toggle_uploads_enabled(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
uploads_enabled=user.toggle_uploads_enabled()
return JsonResponse(json.dumps({'uploads_enabled':uploads_enabled}))
@admin_bp.route('/admin/users/delete/<int:id>', methods=['GET', 'POST'])
@admin_required
def delete_user(id):
user=User.find(id=id)
if not user:
flash(_("User not found"), 'warning')
return redirect(make_url_for('admin_bp.list_users'))
if request.method == 'POST' and 'username' in request.form:
if user.is_root_user():
flash(_("Cannot delete root user"), 'warning')
return redirect(make_url_for('admin_bp.inspect_user', id=user.id))
if user.id == g.current_user.id:
flash(_("Cannot delete yourself"), 'warning')
return redirect(make_url_for('admin_bp.inspect_user',
username=user.username))
if user.username == request.form['username']:
user.delete_user()
flash(_("Deleted user '%s'" % (user.username)), 'success')
return redirect(make_url_for('admin_bp.list_users'))
else:
flash(_("Username does not match"), 'warning')
return render_template('delete-user.html', user=user)
@admin_bp.route('/admin/users/csv', methods=['GET'])
@admin_required
def csv_users():
csv_file = g.site.write_users_csv()
@after_this_request
def remove_file(response):
os.remove(csv_file)
return response
return send_file(csv_file, mimetype="text/csv", as_attachment=True)
""" Form management """
@admin_bp.route('/admin/forms', methods=['GET'])
@admin_required
def list_forms():
return render_template('list-forms.html', forms=Form.find_all())
@admin_bp.route('/admin/forms/toggle-public/<int:id>', methods=['GET'])
@admin_required
def toggle_form_public_admin_prefs(id):
queriedForm = Form.find(id=id)
if not queriedForm:
flash(_("Can't find that form"), 'warning')
return redirect(make_url_for('form_bp.my_forms'))
queriedForm.toggle_admin_form_public()
return redirect(make_url_for('form_bp.inspect_form', form_id=id))
""" Invitations """
@admin_bp.route('/admin/invites', methods=['GET'])
@admin_required
def list_invites():
return render_template('list-invites.html', invites=Invite.find_all())
@admin_bp.route('/admin/invites/new', methods=['GET', 'POST'])
@admin_required
def new_invite():
wtform=wtf.NewInvite()
if wtform.validate_on_submit():
message=wtform.message.data
token = utils.create_token(Invite)
#pprint(token)
new_invite=Invite( email=wtform.email.data,
message=message,
token=token,
admin=wtform.admin.data)
new_invite.save()
status = Dispatcher().send_invitation(new_invite)
if status['email_sent'] == True:
flash_text = _("We have sent an invitation to %s" % new_invite.email)
flash(flash_text, 'success')
else:
flash(status['msg'], 'warning')
return redirect(make_url_for('admin_bp.list_invites'))
wtform.message.data=Invite.default_message()
return render_template('new-invite.html',
wtform=wtform,
total_invites=Invite.find_all().count())
@admin_bp.route('/admin/invites/delete/<int:id>', methods=['GET'])
@admin_required
def delete_invite(id):
invite=Invite.find(id=id)
if invite:
invite.delete()
# i18n: Invitation to [email protected] deleted OK
flash(_("Invitation to %s deleted OK" % invite.email), 'success')
else:
flash(_("Opps! We can't find that invitation"), 'error')
return redirect(make_url_for('admin_bp.list_invites'))
""" Personal Admin preferences """
@admin_bp.route('/admin/toggle-newuser-notification', methods=['POST'])
@admin_required
def toggle_newUser_notification():
return json.dumps({'notify': g.current_user.toggle_new_user_notification()})
@admin_bp.route('/admin/toggle-newform-notification', methods=['POST'])
@admin_required
def toggle_newForm_notification():
return json.dumps({'notify': g.current_user.toggle_new_form_notification()})
""" ROOT_USERS functions
"""
@admin_bp.route('/admin/forms/change-author/<int:form_id>', methods=['GET', 'POST'])
@rootuser_required
def change_author(form_id):
queriedForm = Form.find(id=form_id)
if not queriedForm:
flash(_("Can't find that form"), 'warning')
return redirect(make_url_for('user_bp.my_forms'))
if request.method == 'POST':
author = queriedForm.author
if not ('old_author_username' in request.form and \
request.form['old_author_username']==author.username):
flash(_("Current author incorrect"), 'warning')
return render_template('change-author.html', form=queriedForm)
if 'new_author_username' in request.form:
new_author=User.find(username=request.form['new_author_username'])
if new_author:
if new_author.enabled:
old_author=author
if queriedForm.change_author(new_author):
log_text = _("Changed author from %s to %s" % (
old_author.username,
new_author.username))
queriedForm.add_log(log_text)
flash(_("Changed author OK"), 'success')
return redirect(make_url_for('form_bp.inspect_form',
form_id=queriedForm.id))
else:
flash(_("Cannot use %s. The user is not enabled" % (
request.form['new_author_username']),
), 'warning')
else:
flash(_("Can't find username %s" % (
request.form['new_author_username'])
), 'warning')
return render_template('change-author.html', form=queriedForm)
| [((26, 11, 27, 57), 'flask.Blueprint', 'Blueprint', (), '', False, 'from flask import session, flash, Blueprint\n'), ((50, 9, 50, 25), 'liberaforms.models.user.User.find', 'User.find', (), '', False, 'from liberaforms.models.user import User\n'), ((54, 11, 54, 58), 'flask.render_template', 'render_template', (), '', False, 'from flask import g, request, render_template, redirect\n'), ((60, 9, 60, 25), 'liberaforms.models.user.User.find', 'User.find', (), '', False, 'from liberaforms.models.user import User\n'), ((74, 9, 74, 25), 'liberaforms.models.user.User.find', 'User.find', (), '', False, 'from liberaforms.models.user import User\n'), ((87, 9, 87, 25), 'liberaforms.models.user.User.find', 'User.find', (), '', False, 'from liberaforms.models.user import User\n'), ((96, 9, 96, 25), 'liberaforms.models.user.User.find', 'User.find', (), '', False, 'from liberaforms.models.user import User\n'), ((115, 11, 115, 57), 'flask.render_template', 'render_template', (), '', False, 'from flask import g, request, render_template, redirect\n'), ((121, 15, 121, 39), 'flask.g.site.write_users_csv', 'g.site.write_users_csv', ({}, {}), '()', False, 'from flask import g, request, render_template, redirect\n'), ((126, 11, 126, 71), 'flask.send_file', 'send_file', (), '', False, 'from flask import send_file, after_this_request\n'), ((140, 18, 140, 34), 'liberaforms.models.form.Form.find', 'Form.find', (), '', False, 'from liberaforms.models.form import Form\n'), ((159, 11, 159, 26), 'liberaforms.utils.wtf.NewInvite', 'wtf.NewInvite', ({}, {}), '()', False, 'from liberaforms.utils import wtf\n'), ((176, 24, 176, 48), 'liberaforms.models.invite.Invite.default_message', 'Invite.default_message', ({}, {}), '()', False, 'from liberaforms.models.invite import Invite\n'), ((185, 11, 185, 29), 'liberaforms.models.invite.Invite.find', 'Invite.find', (), '', False, 'from liberaforms.models.invite import Invite\n'), ((215, 18, 215, 39), 'liberaforms.models.form.Form.find', 'Form.find', (), '', False, 'from liberaforms.models.form import Form\n'), ((246, 11, 246, 66), 'flask.render_template', 'render_template', (), '', False, 'from flask import g, request, render_template, redirect\n'), ((68, 24, 68, 55), 'json.dumps', 'json.dumps', ({(68, 35, 68, 54): "{'blocked': blocked}"}, {}), "({'blocked': blocked})", False, 'import os, json\n'), ((82, 24, 82, 54), 'json.dumps', 'json.dumps', ({(82, 35, 82, 53): "{'admin': is_admin}"}, {}), "({'admin': is_admin})", False, 'import os, json\n'), ((91, 24, 91, 71), 'json.dumps', 'json.dumps', ({(91, 35, 91, 70): "{'uploads_enabled': uploads_enabled}"}, {}), "({'uploads_enabled': uploads_enabled})", False, 'import os, json\n'), ((124, 8, 124, 27), 'os.remove', 'os.remove', ({(124, 18, 124, 26): 'csv_file'}, {}), '(csv_file)', False, 'import os, json\n'), ((145, 20, 145, 68), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (), '', False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((162, 16, 162, 42), 'liberaforms.utils.utils.create_token', 'utils.create_token', ({(162, 35, 162, 41): 'Invite'}, {}), '(Invite)', False, 'from liberaforms.utils import utils\n'), ((164, 19, 167, 52), 'liberaforms.models.invite.Invite', 'Invite', (), '', False, 'from liberaforms.models.invite import Invite\n'), ((192, 20, 192, 57), 'liberaforms.utils.utils.make_url_for', 'make_url_for', ({(192, 33, 192, 56): '"""admin_bp.list_invites"""'}, {}), "('admin_bp.list_invites')", False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((35, 40, 35, 63), 'liberaforms.utils.utils.get_app_version', 'utils.get_app_version', ({}, {}), '()', False, 'from liberaforms.utils import utils\n'), ((44, 34, 44, 49), 'liberaforms.models.user.User.find_all', 'User.find_all', ({}, {}), '()', False, 'from liberaforms.models.user import User\n'), ((45, 36, 45, 53), 'liberaforms.models.invite.Invite.find_all', 'Invite.find_all', ({}, {}), '()', False, 'from liberaforms.models.invite import Invite\n'), ((52, 14, 52, 33), 'flask_babel.gettext', '_', ({(52, 16, 52, 32): '"""User not found"""'}, {}), "('User not found')", True, 'from flask_babel import gettext as _\n'), ((53, 24, 53, 59), 'liberaforms.utils.utils.make_url_for', 'make_url_for', ({(53, 37, 53, 58): '"""admin_bp.list_users"""'}, {}), "('admin_bp.list_users')", False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((62, 28, 62, 40), 'json.dumps', 'json.dumps', ({}, {}), '()', False, 'import os, json\n'), ((76, 28, 76, 40), 'json.dumps', 'json.dumps', ({}, {}), '()', False, 'import os, json\n'), ((89, 28, 89, 40), 'json.dumps', 'json.dumps', ({}, {}), '()', False, 'import os, json\n'), ((98, 14, 98, 33), 'flask_babel.gettext', '_', ({(98, 16, 98, 32): '"""User not found"""'}, {}), "('User not found')", True, 'from flask_babel import gettext as _\n'), ((99, 24, 99, 59), 'liberaforms.utils.utils.make_url_for', 'make_url_for', ({(99, 37, 99, 58): '"""admin_bp.list_users"""'}, {}), "('admin_bp.list_users')", False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((134, 52, 134, 67), 'liberaforms.models.form.Form.find_all', 'Form.find_all', ({}, {}), '()', False, 'from liberaforms.models.form import Form\n'), ((142, 14, 142, 39), 'flask_babel.gettext', '_', ({(142, 16, 142, 38): '"""Can\'t find that form"""'}, {}), '("Can\'t find that form")', True, 'from flask_babel import gettext as _\n'), ((143, 24, 143, 56), 'liberaforms.utils.utils.make_url_for', 'make_url_for', ({(143, 37, 143, 55): '"""form_bp.my_forms"""'}, {}), "('form_bp.my_forms')", False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((153, 56, 153, 73), 'liberaforms.models.invite.Invite.find_all', 'Invite.find_all', ({}, {}), '()', False, 'from liberaforms.models.invite import Invite\n'), ((171, 25, 171, 81), 'flask_babel.gettext', '_', ({(171, 27, 171, 80): "'We have sent an invitation to %s' % new_invite.email"}, {}), "('We have sent an invitation to %s' % new_invite.email)", True, 'from flask_babel import gettext as _\n'), ((172, 12, 172, 40), 'flask.flash', 'flash', ({(172, 18, 172, 28): 'flash_text', (172, 30, 172, 39): '"""success"""'}, {}), "(flash_text, 'success')", False, 'from flask import session, flash, Blueprint\n'), ((174, 12, 174, 43), 'flask.flash', 'flash', ({(174, 18, 174, 31): "status['msg']", (174, 33, 174, 42): '"""warning"""'}, {}), "(status['msg'], 'warning')", False, 'from flask import session, flash, Blueprint\n'), ((175, 24, 175, 61), 'liberaforms.utils.utils.make_url_for', 'make_url_for', ({(175, 37, 175, 60): '"""admin_bp.list_invites"""'}, {}), "('admin_bp.list_invites')", False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((189, 14, 189, 61), 'flask_babel.gettext', '_', ({(189, 16, 189, 60): "('Invitation to %s deleted OK' % invite.email)"}, {}), "('Invitation to %s deleted OK' % invite.email)", True, 'from flask_babel import gettext as _\n'), ((191, 14, 191, 54), 'flask_babel.gettext', '_', ({(191, 16, 191, 53): '"""Opps! We can\'t find that invitation"""'}, {}), '("Opps! We can\'t find that invitation")', True, 'from flask_babel import gettext as _\n'), ((200, 33, 200, 78), 'flask.g.current_user.toggle_new_user_notification', 'g.current_user.toggle_new_user_notification', ({}, {}), '()', False, 'from flask import g, request, render_template, redirect\n'), ((206, 33, 206, 78), 'flask.g.current_user.toggle_new_form_notification', 'g.current_user.toggle_new_form_notification', ({}, {}), '()', False, 'from flask import g, request, render_template, redirect\n'), ((217, 14, 217, 39), 'flask_babel.gettext', '_', ({(217, 16, 217, 38): '"""Can\'t find that form"""'}, {}), '("Can\'t find that form")', True, 'from flask_babel import gettext as _\n'), ((218, 24, 218, 56), 'liberaforms.utils.utils.make_url_for', 'make_url_for', ({(218, 37, 218, 55): '"""user_bp.my_forms"""'}, {}), "('user_bp.my_forms')", False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((224, 19, 224, 74), 'flask.render_template', 'render_template', (), '', False, 'from flask import g, request, render_template, redirect\n'), ((226, 23, 226, 78), 'liberaforms.models.user.User.find', 'User.find', (), '', False, 'from liberaforms.models.user import User\n'), ((103, 18, 103, 46), 'flask_babel.gettext', '_', ({(103, 20, 103, 45): '"""Cannot delete root user"""'}, {}), "('Cannot delete root user')", True, 'from flask_babel import gettext as _\n'), ((104, 28, 104, 77), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (), '', False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((106, 18, 106, 45), 'flask_babel.gettext', '_', ({(106, 20, 106, 44): '"""Cannot delete yourself"""'}, {}), "('Cannot delete yourself')", True, 'from flask_babel import gettext as _\n'), ((107, 28, 108, 64), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (), '', False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((111, 18, 111, 58), 'flask_babel.gettext', '_', ({(111, 20, 111, 57): '("Deleted user \'%s\'" % user.username)'}, {}), '("Deleted user \'%s\'" % user.username)', True, 'from flask_babel import gettext as _\n'), ((112, 28, 112, 63), 'liberaforms.utils.utils.make_url_for', 'make_url_for', ({(112, 41, 112, 62): '"""admin_bp.list_users"""'}, {}), "('admin_bp.list_users')", False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n'), ((114, 18, 114, 46), 'flask_babel.gettext', '_', ({(114, 20, 114, 45): '"""Username does not match"""'}, {}), "('Username does not match')", True, 'from flask_babel import gettext as _\n'), ((169, 17, 169, 29), 'liberaforms.utils.dispatcher.Dispatcher', 'Dispatcher', ({}, {}), '()', False, 'from liberaforms.utils.dispatcher import Dispatcher\n'), ((223, 18, 223, 47), 'flask_babel.gettext', '_', ({(223, 20, 223, 46): '"""Current author incorrect"""'}, {}), "('Current author incorrect')", True, 'from flask_babel import gettext as _\n'), ((179, 42, 179, 59), 'liberaforms.models.invite.Invite.find_all', 'Invite.find_all', ({}, {}), '()', False, 'from liberaforms.models.invite import Invite\n'), ((243, 22, 245, 22), 'flask_babel.gettext', '_', ({(243, 24, 244, 68): '("Can\'t find username %s" % request.form[\'new_author_username\'])'}, {}), '("Can\'t find username %s" % request.form[\'new_author_username\'])', True, 'from flask_babel import gettext as _\n'), ((231, 35, 233, 77), 'flask_babel.gettext', '_', ({(231, 37, 233, 76): "'Changed author from %s to %s' % (old_author.username, new_author.username)"}, {}), "('Changed author from %s to %s' % (old_author.username, new_author.username))", True, 'from flask_babel import gettext as _\n'), ((239, 26, 241, 26), 'flask_babel.gettext', '_', ({(239, 28, 240, 72): "('Cannot use %s. The user is not enabled' % request.form['new_author_username']\n )"}, {}), "('Cannot use %s. The user is not enabled' % request.form[\n 'new_author_username'])", True, 'from flask_babel import gettext as _\n'), ((235, 30, 235, 52), 'flask_babel.gettext', '_', ({(235, 32, 235, 51): '"""Changed author OK"""'}, {}), "('Changed author OK')", True, 'from flask_babel import gettext as _\n'), ((236, 40, 237, 76), 'liberaforms.utils.utils.make_url_for', 'make_url_for', (), '', False, 'from liberaforms.utils.utils import make_url_for, JsonResponse\n')] |
r-pad/zephyr | python/zephyr/datasets/score_dataset.py | c8f45e207c11bfc2b21df169db65a7df892d2848 | import os, copy
import cv2
from functools import partial
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset
from zephyr.data_util import to_np, vectorize, img2uint8
from zephyr.utils import torch_norm_fast
from zephyr.utils.mask_edge import getRendEdgeScore
from zephyr.utils.edges import generate_distance_image
from zephyr.normals import compute_normals
from zephyr.utils.timer import TorchTimer
try:
from zephyr.datasets.bop_raw_dataset import BopRawDataset
except ImportError:
pass
from zephyr.datasets.prep_dataset import PrepDataset
IMPORTANCE_ORDER = [
28, 27, 32, 33, 36, 35, 29, 16, 26, 22, 13, 4, 26, 21, 22
]
class ScoreDataset(Dataset):
def __init__(self, datapoints, dataset_root, dataset_name, args, mode='train', timing = False):
self.args = args
self.datapoints = datapoints
self.dataset_root = dataset_root
self.dataset_name = dataset_name
self.mode = mode
self.return_full_data = False
self.feature_size = args.feature_size
self.norm_cos_weight = args.norm_cos_weight
self.top_n_feat = args.top_n_feat
self.max_hypos = args.max_hypos
self.ppf_only = args.ppf_only
self.n_ppf_hypos = args.n_ppf_hypos
self.n_sift_hypos = args.n_sift_hypos
self.use_mask_test = args.use_mask_test
if args.raw_bop_dataset:
self.loader = BopRawDataset(
args.bop_root, self.dataset_name, args.split, args.split_name, args.ppf_result_file, no_sift=args.ppf_only, no_ppf=args.sift_only
)
else:
self.loader = PrepDataset(
self.dataset_root, self.feature_size
)
self.dim_point = 0
self.dim_render = 0
self.dim_agg = 0
# About timing
self.timing = timing
self.timing_list = []
if args.model_name == "maskconv":
print("Using Point Render dataset")
self.return_rend, self.return_points, self.return_agg = True, True, False
else:
self.return_rend = False
if args.dataset == "feat":
print("Using Agg Dataset")
self.return_points, self.return_agg = False, True
else: # Use PointNet dataset
if "mix" in args.dataset:
print("Using Mix Dataset")
self.return_points, self.return_agg = True, True
else:
print("Using PointNet Dataset")
self.return_points, self.return_agg = True, False
'''For aggregated features Data'''
if self.return_agg:
self.std = None
self.mean = None
self.feature_inliers = True
self.use_hsv = True
self.normalize = True
self.fs_thresh = 0.02
if args.selected_features is not None:
self.selected_features = args.selected_features
print("Using feature indices:", self.selected_features)
elif self.top_n_feat is not None:
self.selected_features = IMPORTANCE_ORDER[:self.top_n_feat]
print("ScoreDataset: Using top features N =", self.top_n_feat)
print("Using feature indices:", self.selected_features)
args.selected_features = self.selected_features
else:
self.selected_features = list(range(39))
print("Using all aggregated features")
args.selected_features = self.selected_features
self.dim_agg = len(self.selected_features)
self.vectorize = partial(vectorize,
use_hsv=self.use_hsv,
feature_inliers=self.feature_inliers,
norm_cos_weight=self.norm_cos_weight,
fs_thresh=self.fs_thresh
)
self.agg_cache = [None for _ in range(len(self.datapoints))]
'''For PointNet Data'''
self.point_x_labels = []
if self.return_points:
self.max_points = args.max_points
args.xyz_channel = [] # indices of point_x channels that define coordinates
args.model_channel = [] # indices of point_x channels that are specific to the object model
'''Mask channel'''
num_features = 0
# valid_proj.unsqueeze(-1).float(),
# valid_depth.unsqueeze(-1).float(),
if not self.args.no_valid_proj:
self.point_x_labels += ['valid_proj']
num_features += 1
if not self.args.no_valid_depth:
self.point_x_labels += ["valid_depth"]
num_features += 1
'''XYZ channel'''
self.uvd, self.uv = False, False
if "uvd" in args.dataset:
self.uvd = True
args.xyz_channel = list(range(num_features, num_features + 3))
num_features +=3
self.point_x_labels += ['u', 'v', 'd']
elif "uv" in args.dataset:
self.uv = True
args.xyz_channel = list(range(num_features, num_features + 2))
num_features += 2
self.point_x_labels += ['u', 'v']
else:
num_features += 0
args.model_channel += args.xyz_channel
num_non_data = num_features
'''Data channel'''
if "cos" in args.dataset:
self.point_x_labels += ['cam_norm_cos']
self.RGB, self.HSV, self.D, self.diff, self.cos, self.edge, self.ppfscore, self.norm_cos = \
False, False, False, False, False, False, False, False
if "RGB" in args.dataset:
self.RGB, self.HSV = True, False
args.model_channel += list(range(num_features, num_features + 3))
num_features += 6
self.point_x_labels += ['R_diff', 'G_diff', 'B_diff'] if "diff" in args.dataset else ["R1", "G1", "B1", "R2", "G2", "B2"]
elif "HSV" in args.dataset:
self.RGB, self.HSV = True, True
args.model_channel += list(range(num_features, num_features + 3))
num_features += 6
self.point_x_labels += ['H_diff', 'S_diff', 'V_diff'] if "diff" in args.dataset else ["H1", "S1", "V1", "H2", "S2", "V2"]
if "D" in args.dataset:
self.D = True
args.model_channel += list(range(num_features, num_features + 1))
num_features += 2
self.point_x_labels += ["D_diff"] if "diff" in args.dataset else ["D1", "D2"]
if "diff" in args.dataset:
self.diff = True
num_features = num_non_data + (num_features-num_non_data) // 2
if "cos" in args.dataset:
self.cos = True
num_features += 1
if "edge" in args.dataset:
self.edge = True
self.edgecos = "edgecos" in args.dataset
self.edgexnor = "edgexnor" in args.dataset
num_features += 1 if (self.edgecos or self.edgexnor) else 2
if self.edgecos:
self.point_x_labels += ['obs_edge_score']
elif self.edgexnor:
self.point_x_labels += ['edge_xnor']
else:
self.point_x_labels += ['obs_edge_score', "rend_edge_score"]
if "ppfscore" in args.dataset:
self.ppfscore = True
num_features += 1
self.point_x_labels += ['ppf_score']
if "norm" in args.dataset:
self.norm_cos = True
num_features += 1
self.point_x_labels += ['norm_cos']
self.seg_mask = False
if "seg" in args.dataset:
self.seg_mask = True
num_features += 1
self.point_x_labels += ['mask', "mask_edge"]
self.dim_point = num_features
'''Train/Test specific config'''
if self.mode == 'train':
print("Initializating training dataset", self.point_x_labels)
self.cojitter = args.cojitter
self.drop_ratio = args.drop_ratio
self.uv_rot = args.uv_rot
else:
print("Initializating %s dataset" % mode, self.point_x_labels)
self.cojitter = False
self.drop_ratio = 0
self.uv_rot = False
self.transform = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.05),
torchvision.transforms.ToTensor(),
])
if self.cojitter:
self.transform_cojitter = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
torchvision.transforms.ToTensor(),
])
print("ScorePointnetDataset: Using cojitter")
if self.return_rend:
self.dim_render = self.dim_point - 1
def __len__(self):
return len(self.datapoints)
def setNormalization(self, var, mean):
var = torch.from_numpy(np.asarray(var))
mean = torch.from_numpy(np.asarray(mean))
self.std = torch.sqrt(var[self.selected_features]).float()
self.mean = mean[self.selected_features].float()
'''Return [n_hypo, n_features]'''
def getAggData(self, data):
x = self.vectorize(data)
x = x[:, self.selected_features]
if self.normalize:
x = (x-self.mean)/self.std
return x
'''Return [n_hypo, n_points, n_features]'''
def getPointNetData(self, data, return_uv_original=False):
with TorchTimer("Data convert 1", agg_list=self.timing_list, timing = self.timing, verbose=False):
img = data['img'].float() # float [0, 1]
depth = data['depth'].float()
if "pbr" in self.dataset_root and self.mode == "train":
# print("blur depth image")
depth = depth * (torch.ones_like(depth) + 0.003 * torch.randn_like(depth))
transforms = data['transforms'].float()
model_points = data['model_points'].float()
model_colors = data['model_colors'].float() # float [0, 1]
model_normals = data['model_normals'].float()
meta_data = data['meta_data']
with TorchTimer("Transform and project", agg_list=self.timing_list, timing = self.timing, verbose=False):
# Transform and project point cloud
trans_pts = torch.einsum('ijk,mk->imj', transforms[:,:3,:3], model_points) + transforms[:,:3,3].unsqueeze(1)
f_cam = torch.tensor([meta_data['camera_fx'], meta_data['camera_fy']])
c_cam = torch.tensor([meta_data['camera_cx'], meta_data['camera_cy']])
proj_pts = trans_pts[:,:,:2]/trans_pts[:,:,2:]*f_cam + c_cam
uv = proj_pts.long()
invalid_proj = (uv[:,:,1]>=img.shape[0]) + (uv[:,:,1]<0) \
+ (uv[:,:,0]>=img.shape[1]) + (uv[:,:,0]< 0)
uv[invalid_proj] = 0
# Projected depth
proj_depth = trans_pts[:,:,-1]
'''Jitter the color as data augmentation'''
if self.mode == "train":
img = img.permute(2, 0, 1) # (H, W, C) to (C, H, W)
img = self.transform(img)
img = img.permute(1, 2, 0) # (C, H, W) to (H, W, C)
if self.cojitter:
H, W, C = img.shape # (H, W, C)
N, _ = model_colors.shape
data_cojitter = torch.cat([
img.reshape((1, -1, 3)),
model_colors.reshape((1, -1, 3))
], dim=1)
data_cojitter = data_cojitter.permute(2, 0, 1)
cojittered = self.transform_cojitter(data_cojitter)
cojittered = cojittered.permute(1, 2, 0)
img = cojittered[0, :H*W, :].reshape((H, W, C))
model_colors = cojittered[0, H*W:, :].reshape((N, C))
# RGb to HSV
with TorchTimer("RGB to HSV", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.HSV:
with np.errstate(divide='ignore'):
img_rgb = img2uint8(to_np(img))
# img_hsv = rgb2hsv(img_rgb) # this will convert it to range [0, 1]
img_hsv = cv2.cvtColor(img_rgb,cv2.COLOR_RGB2HSV)
img_hsv = img_hsv.astype(float) / 255.0
img = torch.from_numpy(img_hsv).to(img.device).float()
model_colors_rgb = img2uint8(np.expand_dims(to_np(model_colors), 0))
# model_colors_hsv = rgb2hsv(model_colors_rgb)[0]
model_colors_hsv = cv2.cvtColor(model_colors_rgb,cv2.COLOR_RGB2HSV)[0]
model_colors_hsv = model_colors_hsv.astype(float) / 255.0
model_colors = torch.from_numpy(model_colors_hsv).to(model_colors.device).float()
# Sample the observed HSVD
with TorchTimer("Sample obvervation", agg_list=self.timing_list, timing = self.timing, verbose=False):
obs_color = img[uv[:,:,1], uv[:,:,0], :]
obs_depth = depth[uv[:,:,1], uv[:,:,0]]
with TorchTimer("Hypo Pruning", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.args.inconst_ratio_th is not None and self.mode == "test":
d_diff = proj_depth - obs_depth
n_points = model_points.shape[0]
invalid_count = (d_diff < -0.02).sum(-1).float()
invalid_ratio = invalid_count / n_points
th = self.args.inconst_ratio_th
idx = invalid_ratio < (th/100.0)
idx[-1] = True
# At least preserve some non-oracle hypos
if idx.sum() == 1:
idx[0] = True
pruning_mask = idx
transforms = transforms[idx]
trans_pts = trans_pts[idx]
obs_color = obs_color[idx]
obs_depth = obs_depth[idx]
uv = uv[idx]
invalid_proj = invalid_proj[idx]
proj_depth = proj_depth[idx]
self.SelectDataByIdx(data, idx)
uv_original = copy.deepcopy(uv)
data['uv_original'] = uv_original
# Transform normals
with TorchTimer("Transform and project 2", agg_list=self.timing_list, timing = self.timing, verbose=False):
trans_norms = torch.einsum('ijk,mk->imj', transforms[:,:3,:3], model_normals)
cam_norm_cos = (- trans_pts * trans_norms).sum(-1) / (torch_norm_fast(trans_pts, -1) * torch_norm_fast(trans_norms, -1))
valid_norm = cam_norm_cos > 0
valid_proj = valid_norm * torch.bitwise_not(invalid_proj)
data['valid_proj'] = valid_proj
# x = []
x = model_points.new_empty((len(transforms), len(model_points), self.dim_point))
idx_feat = 0
with TorchTimer("Valid proj/depth", agg_list=self.timing_list, timing = self.timing, verbose=False):
valid_depth = obs_depth > 0
'''Mask channel'''
if not self.args.no_valid_proj:
# x += [valid_proj.unsqueeze(-1).float()]
x[:, :, idx_feat] = valid_proj.float()
idx_feat += 1
if not self.args.no_valid_depth:
# x += [valid_depth.unsqueeze(-1).float()]
x[:, :, idx_feat] = valid_depth.float()
idx_feat += 1
'''XYZ channel'''
with TorchTimer("Normalize uv", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.uv or self.uvd:
uv = uv.float()
uv_mean = uv.mean(dim=1, keepdim=True)
uv_std = uv.std(dim=1, keepdim=True)
uv = (uv - uv_mean) / uv_std
if self.uv_rot:
n_hypo, n_point, n_coord = uv.shape
'''random flip'''
flip_mat = torch.rand((n_hypo, 1, n_coord)) > 0.5
flip_mat = (flip_mat.type(uv.dtype) - 0.5) * 2
uv = uv * flip_mat
'''random rotation'''
rot_mat = torch.rand((n_hypo, 1, 1)) * 2 * np.pi
rot_mat = torch.cat([
torch.cos(rot_mat), -torch.sin(rot_mat),
torch.sin(rot_mat), torch.cos(rot_mat)
], 2).reshape((-1, 1, 2, 2))
uv = uv.unsqueeze(-1)
uv = torch.matmul(rot_mat, uv)
uv = uv.squeeze()
# x += [uv]
x[:, :, idx_feat:idx_feat+2] = uv
idx_feat += 2
if self.uvd:
d_diff = proj_depth.unsqueeze(-1) - obs_depth.unsqueeze(-1)
d_diff = (d_diff - d_diff.mean(dim=1, keepdim=True)) / d_diff.std(dim=1, keepdim=True)
# x += [d_diff]
x[:, :, idx_feat:idx_feat+1] = d_diff
idx_feat += 1
'''Point data channel'''
if self.cos:
# x += [cam_norm_cos.unsqueeze(-1).float()]
x[:, :, idx_feat] = cam_norm_cos.float()
idx_feat += 1
with TorchTimer("Compute RGBD/HSVD diff", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.RGB or self.HSV:
if self.diff:
color_diff = model_colors.unsqueeze(0).expand(obs_color.shape) - obs_color
if self.HSV:
color_diff[:,:,0] = color_diff[:,:,0].abs()
color_diff[:,:,0] = np.minimum(color_diff[:,:,0], 1-color_diff[:,:,0])
# x += [color_diff]
x[:, :, idx_feat:idx_feat+3] = color_diff
idx_feat += 3
else:
# x += [model_colors.unsqueeze(0).expand(obs_color.shape), obs_color]
x[:, :, idx_feat:idx_feat+3] = model_colors.unsqueeze(0).expand(obs_color.shape)
idx_feat += 3
x[:, :, idx_feat:idx_feat+3] = obs_color
idx_feat += 3
if self.D:
if self.diff:
# x += [proj_depth.unsqueeze(-1) - obs_depth.unsqueeze(-1)]
x[:, :, idx_feat] = proj_depth - obs_depth
idx_feat += 1
else:
# x += [proj_depth.unsqueeze(-1), obs_depth.unsqueeze(-1)]
x[:, :, idx_feat] = proj_depth
idx_feat += 1
x[:, :, idx_feat] = obs_depth
idx_feat += 1
'''Edge channel'''
with TorchTimer("Edge", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.edge:
'''Observed edges'''
if "depth_for_edge" in data:
depth_for_edge = data['depth_for_edge']
# print("Using depth_for_edge", depth_for_edge.min(), depth_for_edge.max())
else:
depth_for_edge = depth
with TorchTimer("generate_distance_image", agg_list=self.timing_list, timing = self.timing, verbose=False):
edge_obs = generate_distance_image(depth_for_edge, canny_l=20, canny_h=50)[0,0]
with TorchTimer("Edge sampling", agg_list=self.timing_list, timing = self.timing, verbose=False):
uv = copy.deepcopy(uv_original) # Re-fetch the uv as it is changed before
edge_score_obs = edge_obs[uv[:,:,1], uv[:,:,0]]
edge_score_obs = torch.exp(-edge_score_obs / 24)
'''Projected edges'''
with TorchTimer("getRendEdgeScore", agg_list=self.timing_list, timing = self.timing, verbose=False):
if "edge_score_rend" in data:
edge_score_rend = data['edge_score_rend']
else:
with torch.no_grad():
edge_score_rend = getRendEdgeScore(img.to(self.args.edge_gpu), uv_original.to(self.args.edge_gpu)).to(uv_original.device)
'''Normalized edge scores'''
edge_score_rend = edge_score_rend / edge_score_rend.max(1, keepdim=True)[0]
# edge_score_obs = torch.exp(-edge_score_obs / )
if self.edgexnor:
edge_score = edge_score_rend * edge_score_obs + (1 - edge_score_rend) * (1 - edge_score_obs)
# x += [edge_score.unsqueeze(-1)]
x[:, :, idx_feat] = edge_score
idx_feat += 1
elif self.edgecos:
# x += [edge_score_obs.unsqueeze(-1)]
x[:, :, idx_feat] = edge_score_obs
idx_feat += 1
else:
# x += [edge_score_obs.unsqueeze(-1)]
# x += [edge_score_rend.unsqueeze(-1)]
x[:, :, idx_feat] = edge_score_obs
idx_feat += 1
x[:, :, idx_feat] = edge_score_rend
idx_feat += 1
if self.args.camera_scale is not None:
meta_data['camera_scale'] = self.args.camera_scale
'''Use the cos of the angle between observed and rendered normal vectors'''
with TorchTimer("Normal vector", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.norm_cos:
norm_downsample = self.args.norm_downsample
uv = uv_original # Re-fetch the uv as it is changed before
normals = compute_normals(to_np(depth)[::norm_downsample, ::norm_downsample].astype(np.double), meta_data = meta_data)
normals = torch.from_numpy(normals).float()
scene_normals_proj = normals[uv[:,:,1]//norm_downsample, uv[:,:,0]//norm_downsample]
model_normals_proj = trans_norms
norm_cos = (scene_normals_proj * model_normals_proj).sum(dim=-1) / (torch_norm_fast(scene_normals_proj, -1) * torch_norm_fast(model_normals_proj, -1))
norm_cos[norm_cos != norm_cos] = 0
# x += [norm_cos.unsqueeze(-1).float()]
x[:, :, idx_feat] = norm_cos.float()
idx_feat += 1
# with TorchTimer("torch.cat()", agg_list=self.timing_list, timing = self.timing, verbose=False):
# x = torch.cat(x, dim=-1)
# print(x.shape)
if self.args.hard_mask:
x[~valid_proj.bool()]=0
'''Sample the points'''
if self.drop_ratio >= 0 and self.mode == 'train':
n_hypo = x.shape[0]
n_point = x.shape[1]
n_point_kept = int((1.0-self.drop_ratio) * n_point)
if self.max_points is not None and n_point_kept > self.max_points:
n_point_kept = self.max_points
idx = []
for i in range(n_hypo):
idx.append(torch.randperm(n_point)[:n_point_kept].unsqueeze(0))
idx = torch.cat(idx, dim=0)
x = x[torch.arange(n_hypo).unsqueeze(1).expand(n_hypo, n_point_kept), idx]
uv_sampled = uv_original[torch.arange(n_hypo).unsqueeze(1).expand(n_hypo, n_point_kept), idx]
else:
uv_sampled = uv_original
if return_uv_original:
return x, uv_sampled
else:
return x
def getPointRenderData(self, data):
point_x, uv = self.getPointNetData(data, True)
crop_size = 96
pad_size = 2
n_hypo = uv.shape[0]
n_point = uv.shape[1]
span_min = pad_size
span_max = crop_size - pad_size
mask_index = [0]
# data_index = [0, 1] + list(range(4, point_x.shape[2]))
data_index = list(range(point_x.shape[2]))
n_feat = len(data_index)
point_mask = point_x[:, :, mask_index].bool()
point_data = point_x[:, :, data_index]
uv = uv.float()
uv_max = uv.max(dim=1, keepdim=True)[0]
uv_min = uv.min(dim=1, keepdim=True)[0]
uv_center = (uv_max + uv_min) / 2.0
uv_radius = (uv_max - uv_min).max(-1, True)[0] / 2.0
uv_norm = (uv - uv_center) / uv_radius # range in [-1, 1]
uv_resize = (uv_norm + 1) / 2 * (span_max - span_min) + span_min
uv_resize = uv_resize.long()
u = uv_resize[:, :, 0]
v = uv_resize[:, :, 1]
feature_map = torch.zeros(n_hypo, n_feat, crop_size, crop_size)
t = torch.arange(n_hypo).view(-1,1).repeat(1, n_point)
u = u.reshape(-1)[point_mask.view(-1)]
v = v.reshape(-1)[point_mask.view(-1)]
t = t.view(-1)[point_mask.view(-1)]
feature_map[t.view(-1), :, v.view(-1), u.view(-1)] = point_data.view(-1, n_feat)[point_mask.view(-1)]
mask_map = feature_map[:, 0:1, :, :]
data_map = feature_map[:, 1:, :, :]
return mask_map, data_map
def SelectDataByIdx(self, data, idx):
data['transforms'] = data['transforms'][idx]
data['pp_err'] = data['pp_err'][idx]
if "edge_score_rend" in data:
data['edge_score_rend'] = data['edge_score_rend'][idx]
return data
def __getitem__(self, idx):
dp = self.datapoints[idx]
to_return = {"object_id": dp[0], "scene_id": dp[1], "im_id": dp[2]}
obj_id = dp[0]
scene_id = dp[1]
im_id = dp[2]
'''If only used aggregated features, return the cached one'''
if self.return_agg and not self.return_points and self.agg_cache[idx] is not None:
to_return['agg_x'], to_return['pp_err'], to_return['transforms'] = self.agg_cache[idx]
return to_return
# data = loadData(*dp, feature_size = self.feature_size, base_path = self.dataset_root)
# '''Get the model data and send it into the processing function'''
# model_data = self.getModelData(dp[0])
# data.update(model_data)
data = self.loader.loadData(*dp)
assert len(data['pp_err']) == 101 or len(data['pp_err']) == 1101 or len(data['pp_err']) == 301
assert not (self.args.ppf_only and self.args.sift_only)
if self.args.ppf_only:
assert len(data['pp_err']) >= self.args.n_ppf_hypos + 1
idx = list(np.arange(self.args.n_ppf_hypos)) + [-1]
self.SelectDataByIdx(data, idx)
if self.args.sift_only:
assert len(data['pp_err']) >= self.args.n_ppf_hypos + self.args.n_sift_hypos + 1
idx = list(range(self.n_ppf_hypos, self.n_ppf_hypos+self.n_sift_hypos)) + [-1]
data = self.SelectDataByIdx(data, idx)
'''Sample the hypotheses'''
point_x = self.getPointNetData(data)
n_hypo = len(point_x)
to_return['object_id'] = to_return['object_id'].repeat(n_hypo)
to_return['scene_id'] = to_return['scene_id'].repeat(n_hypo)
to_return['im_id'] = to_return['im_id'].repeat(n_hypo)
to_return['pp_err'] = data['pp_err'].reshape(-1)
to_return['transforms'] = data['transforms']
if self.return_agg:
to_return['agg_x'] = self.getAggData(data)
self.agg_cache[idx] = (to_return['agg_x'], to_return['pp_err'], to_return['transforms'])
if self.return_points:
if self.return_rend:
to_return['rend_mask'], to_return['x_rend'] = self.getPointRenderData(data)
to_return['mask_x'] = to_return['rend_mask']
to_return['rend_x'] = to_return['x_rend']
else:
to_return['point_x'] = point_x
# print("to_return['pp_err']", to_return['pp_err'])
# print("to_return['pp_err']", to_return['pp_err'].shape)
# print("to_return['transforms']", to_return['transforms'].shape)
# print("to_return['point_x']", to_return['point_x'].shape)
to_return['dataset_i'] = 0
# For ICP post-processing
to_return['depth'] = data['depth']
to_return['meta_data'] = data['meta_data']
to_return['uv_original'] = data['uv_original']
to_return['model_points'] = data['model_points']
return to_return
| [((578, 22, 578, 71), 'torch.zeros', 'torch.zeros', ({(578, 34, 578, 40): 'n_hypo', (578, 42, 578, 48): 'n_feat', (578, 50, 578, 59): 'crop_size', (578, 61, 578, 70): 'crop_size'}, {}), '(n_hypo, n_feat, crop_size, crop_size)', False, 'import torch\n'), ((46, 26, 48, 17), 'zephyr.datasets.bop_raw_dataset.BopRawDataset', 'BopRawDataset', (), '', False, 'from zephyr.datasets.bop_raw_dataset import BopRawDataset\n'), ((50, 26, 52, 13), 'zephyr.datasets.prep_dataset.PrepDataset', 'PrepDataset', ({(51, 16, 51, 33): 'self.dataset_root', (51, 35, 51, 52): 'self.feature_size'}, {}), '(self.dataset_root, self.feature_size)', False, 'from zephyr.datasets.prep_dataset import PrepDataset\n'), ((103, 29, 108, 17), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((239, 31, 239, 46), 'numpy.asarray', 'np.asarray', ({(239, 42, 239, 45): 'var'}, {}), '(var)', True, 'import numpy as np\n'), ((240, 32, 240, 48), 'numpy.asarray', 'np.asarray', ({(240, 43, 240, 47): 'mean'}, {}), '(mean)', True, 'import numpy as np\n'), ((256, 13, 256, 105), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((270, 13, 270, 112), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((273, 20, 273, 82), 'torch.tensor', 'torch.tensor', ({(273, 33, 273, 81): "[meta_data['camera_fx'], meta_data['camera_fy']]"}, {}), "([meta_data['camera_fx'], meta_data['camera_fy']])", False, 'import torch\n'), ((274, 20, 274, 82), 'torch.tensor', 'torch.tensor', ({(274, 33, 274, 81): "[meta_data['camera_cx'], meta_data['camera_cy']]"}, {}), "([meta_data['camera_cx'], meta_data['camera_cy']])", False, 'import torch\n'), ((306, 13, 306, 101), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((321, 13, 321, 109), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((325, 13, 325, 103), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((351, 26, 351, 43), 'copy.deepcopy', 'copy.deepcopy', ({(351, 40, 351, 42): 'uv'}, {}), '(uv)', False, 'import os, copy\n'), ((355, 13, 355, 114), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((356, 26, 356, 89), 'torch.einsum', 'torch.einsum', ({(356, 39, 356, 52): '"""ijk,mk->imj"""', (356, 54, 356, 73): 'transforms[:, :3, :3]', (356, 75, 356, 88): 'model_normals'}, {}), "('ijk,mk->imj', transforms[:, :3, :3], model_normals)", False, 'import torch\n'), ((367, 13, 367, 107), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((380, 13, 380, 103), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((420, 13, 420, 113), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((450, 13, 450, 95), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((501, 13, 501, 104), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((534, 18, 534, 39), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((241, 19, 241, 58), 'torch.sqrt', 'torch.sqrt', ({(241, 30, 241, 57): 'var[self.selected_features]'}, {}), '(var[self.selected_features])', False, 'import torch\n'), ((272, 24, 272, 86), 'torch.einsum', 'torch.einsum', ({(272, 37, 272, 50): '"""ijk,mk->imj"""', (272, 52, 272, 71): 'transforms[:, :3, :3]', (272, 73, 272, 85): 'model_points'}, {}), "('ijk,mk->imj', transforms[:, :3, :3], model_points)", False, 'import torch\n'), ((359, 38, 359, 69), 'torch.bitwise_not', 'torch.bitwise_not', ({(359, 56, 359, 68): 'invalid_proj'}, {}), '(invalid_proj)', False, 'import torch\n'), ((220, 16, 220, 51), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ({}, {}), '()', False, 'import torchvision\n'), ((221, 16, 221, 106), 'torchvision.transforms.ColorJitter', 'torchvision.transforms.ColorJitter', (), '', False, 'import torchvision\n'), ((222, 16, 222, 49), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ({}, {}), '()', False, 'import torchvision\n'), ((308, 21, 308, 49), 'numpy.errstate', 'np.errstate', (), '', True, 'import numpy as np\n'), ((311, 30, 311, 69), 'cv2.cvtColor', 'cv2.cvtColor', ({(311, 43, 311, 50): 'img_rgb', (311, 51, 311, 68): 'cv2.COLOR_RGB2HSV'}, {}), '(img_rgb, cv2.COLOR_RGB2HSV)', False, 'import cv2\n'), ((357, 66, 357, 96), 'zephyr.utils.torch_norm_fast', 'torch_norm_fast', ({(357, 82, 357, 91): 'trans_pts', (357, 93, 357, 95): '(-1)'}, {}), '(trans_pts, -1)', False, 'from zephyr.utils import torch_norm_fast\n'), ((357, 99, 357, 131), 'zephyr.utils.torch_norm_fast', 'torch_norm_fast', ({(357, 115, 357, 126): 'trans_norms', (357, 128, 357, 130): '(-1)'}, {}), '(trans_norms, -1)', False, 'from zephyr.utils import torch_norm_fast\n'), ((401, 25, 401, 50), 'torch.matmul', 'torch.matmul', ({(401, 38, 401, 45): 'rot_mat', (401, 47, 401, 49): 'uv'}, {}), '(rot_mat, uv)', False, 'import torch\n'), ((459, 21, 459, 122), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((462, 21, 462, 112), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((463, 25, 463, 51), 'copy.deepcopy', 'copy.deepcopy', ({(463, 39, 463, 50): 'uv_original'}, {}), '(uv_original)', False, 'import os, copy\n'), ((465, 37, 465, 68), 'torch.exp', 'torch.exp', ({(465, 47, 465, 67): '-edge_score_obs / 24'}, {}), '(-edge_score_obs / 24)', False, 'import torch\n'), ((469, 21, 469, 115), 'zephyr.utils.timer.TorchTimer', 'TorchTimer', (), '', False, 'from zephyr.utils.timer import TorchTimer\n'), ((622, 23, 622, 55), 'numpy.arange', 'np.arange', ({(622, 33, 622, 54): 'self.args.n_ppf_hypos'}, {}), '(self.args.n_ppf_hypos)', True, 'import numpy as np\n'), ((226, 20, 226, 55), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ({}, {}), '()', False, 'import torchvision\n'), ((227, 20, 227, 109), 'torchvision.transforms.ColorJitter', 'torchvision.transforms.ColorJitter', (), '', False, 'import torchvision\n'), ((228, 20, 228, 53), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ({}, {}), '()', False, 'import torchvision\n'), ((262, 33, 262, 55), 'torch.ones_like', 'torch.ones_like', ({(262, 49, 262, 54): 'depth'}, {}), '(depth)', False, 'import torch\n'), ((309, 40, 309, 50), 'zephyr.data_util.to_np', 'to_np', ({(309, 46, 309, 49): 'img'}, {}), '(img)', False, 'from zephyr.data_util import to_np, vectorize, img2uint8\n'), ((316, 39, 316, 87), 'cv2.cvtColor', 'cv2.cvtColor', ({(316, 52, 316, 68): 'model_colors_rgb', (316, 69, 316, 86): 'cv2.COLOR_RGB2HSV'}, {}), '(model_colors_rgb, cv2.COLOR_RGB2HSV)', False, 'import cv2\n'), ((390, 31, 390, 63), 'torch.rand', 'torch.rand', ({(390, 42, 390, 62): '(n_hypo, 1, n_coord)'}, {}), '((n_hypo, 1, n_coord))', False, 'import torch\n'), ((426, 44, 426, 94), 'numpy.minimum', 'np.minimum', ({(426, 55, 426, 72): 'color_diff[:, :, (0)]', (426, 74, 426, 93): '1 - color_diff[:, :, (0)]'}, {}), '(color_diff[:, :, (0)], 1 - color_diff[:, :, (0)])', True, 'import numpy as np\n'), ((460, 31, 460, 94), 'zephyr.utils.edges.generate_distance_image', 'generate_distance_image', (), '', False, 'from zephyr.utils.edges import generate_distance_image\n'), ((506, 26, 506, 51), 'torch.from_numpy', 'torch.from_numpy', ({(506, 43, 506, 50): 'normals'}, {}), '(normals)', False, 'import torch\n'), ((509, 84, 509, 123), 'zephyr.utils.torch_norm_fast', 'torch_norm_fast', ({(509, 100, 509, 118): 'scene_normals_proj', (509, 120, 509, 122): '(-1)'}, {}), '(scene_normals_proj, -1)', False, 'from zephyr.utils import torch_norm_fast\n'), ((509, 126, 509, 165), 'zephyr.utils.torch_norm_fast', 'torch_norm_fast', ({(509, 142, 509, 160): 'model_normals_proj', (509, 162, 509, 164): '(-1)'}, {}), '(model_normals_proj, -1)', False, 'from zephyr.utils import torch_norm_fast\n'), ((579, 12, 579, 32), 'torch.arange', 'torch.arange', ({(579, 25, 579, 31): 'n_hypo'}, {}), '(n_hypo)', False, 'import torch\n'), ((262, 66, 262, 89), 'torch.randn_like', 'torch.randn_like', ({(262, 83, 262, 88): 'depth'}, {}), '(depth)', False, 'import torch\n'), ((314, 64, 314, 83), 'zephyr.data_util.to_np', 'to_np', ({(314, 70, 314, 82): 'model_colors'}, {}), '(model_colors)', False, 'from zephyr.data_util import to_np, vectorize, img2uint8\n'), ((395, 30, 395, 56), 'torch.rand', 'torch.rand', ({(395, 41, 395, 55): '(n_hypo, 1, 1)'}, {}), '((n_hypo, 1, 1))', False, 'import torch\n'), ((473, 29, 473, 44), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((505, 42, 505, 54), 'zephyr.data_util.to_np', 'to_np', ({(505, 48, 505, 53): 'depth'}, {}), '(depth)', False, 'from zephyr.data_util import to_np, vectorize, img2uint8\n'), ((533, 27, 533, 50), 'torch.randperm', 'torch.randperm', ({(533, 42, 533, 49): 'n_point'}, {}), '(n_point)', False, 'import torch\n'), ((313, 26, 313, 51), 'torch.from_numpy', 'torch.from_numpy', ({(313, 43, 313, 50): 'img_hsv'}, {}), '(img_hsv)', False, 'import torch\n'), ((318, 35, 318, 69), 'torch.from_numpy', 'torch.from_numpy', ({(318, 52, 318, 68): 'model_colors_hsv'}, {}), '(model_colors_hsv)', False, 'import torch\n'), ((397, 24, 397, 42), 'torch.cos', 'torch.cos', ({(397, 34, 397, 41): 'rot_mat'}, {}), '(rot_mat)', False, 'import torch\n'), ((398, 24, 398, 42), 'torch.sin', 'torch.sin', ({(398, 34, 398, 41): 'rot_mat'}, {}), '(rot_mat)', False, 'import torch\n'), ((398, 44, 398, 62), 'torch.cos', 'torch.cos', ({(398, 54, 398, 61): 'rot_mat'}, {}), '(rot_mat)', False, 'import torch\n'), ((397, 45, 397, 63), 'torch.sin', 'torch.sin', ({(397, 55, 397, 62): 'rot_mat'}, {}), '(rot_mat)', False, 'import torch\n'), ((535, 18, 535, 38), 'torch.arange', 'torch.arange', ({(535, 31, 535, 37): 'n_hypo'}, {}), '(n_hypo)', False, 'import torch\n'), ((536, 37, 536, 57), 'torch.arange', 'torch.arange', ({(536, 50, 536, 56): 'n_hypo'}, {}), '(n_hypo)', False, 'import torch\n')] |
GuilhermeEsdras/Grafos | em Python/Roteiro7/Roteiro7__testes_dijkstra.py | b6556c3d679496d576f65b798a1a584cd73e40f4 | from Roteiro7.Roteiro7__funcoes import GrafoComPesos
# .:: Arquivo de Testes do Algoritmo de Dijkstra ::. #
# --------------------------------------------------------------------------- #
grafo_aula = GrafoComPesos(
['E', 'A', 'B', 'C', 'D'],
{
'E-A': 1,
'E-C': 10,
'A-B': 2,
'B-C': 4,
'C-D': 3
}
)
print(grafo_aula)
print('Menor caminho por Dijkstra: ', grafo_aula.dijkstra('E', 'D'))
print("-------------------------")
grafo_aula2 = GrafoComPesos(
['A', 'B', 'C', 'D', 'E', 'F', 'G'],
{
'A-B': 1, 'A-F': 3, 'A-G': 2,
'B-F': 1,
'C-B': 2,
'C-D': 5,
'D-E': 2,
'F-D': 4,
'F-G': 2,
'G-E': 7,
}
)
print(grafo_aula2)
print('Menor caminho por Dijkstra: ', grafo_aula2.dijkstra('A', 'E'))
| [((6, 13, 15, 1), 'Roteiro7.Roteiro7__funcoes.GrafoComPesos', 'GrafoComPesos', ({(7, 4, 7, 29): "['E', 'A', 'B', 'C', 'D']", (8, 4, 14, 5): "{'E-A': 1, 'E-C': 10, 'A-B': 2, 'B-C': 4, 'C-D': 3}"}, {}), "(['E', 'A', 'B', 'C', 'D'], {'E-A': 1, 'E-C': 10, 'A-B': 2,\n 'B-C': 4, 'C-D': 3})", False, 'from Roteiro7.Roteiro7__funcoes import GrafoComPesos\n'), ((21, 14, 33, 1), 'Roteiro7.Roteiro7__funcoes.GrafoComPesos', 'GrafoComPesos', ({(22, 4, 22, 39): "['A', 'B', 'C', 'D', 'E', 'F', 'G']", (23, 4, 32, 5): "{'A-B': 1, 'A-F': 3, 'A-G': 2, 'B-F': 1, 'C-B': 2, 'C-D': 5, 'D-E': 2,\n 'F-D': 4, 'F-G': 2, 'G-E': 7}"}, {}), "(['A', 'B', 'C', 'D', 'E', 'F', 'G'], {'A-B': 1, 'A-F': 3,\n 'A-G': 2, 'B-F': 1, 'C-B': 2, 'C-D': 5, 'D-E': 2, 'F-D': 4, 'F-G': 2,\n 'G-E': 7})", False, 'from Roteiro7.Roteiro7__funcoes import GrafoComPesos\n')] |
awinia-github/QScreenCast | QScreenCast/spyder/api.py | 09d343cae0a1c7f86faf28e08a62bd09976aaf2e | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © Tom Hören
# Licensed under the terms of the MIT License
# ----------------------------------------------------------------------------
"""
Python QtScreenCaster Spyder API.
"""
class ScreenResolutions:
Screen1080x1020 = '1080x1020'
| [] |
aaron19950321/ICOM | setup.py | d5bd0705776c505dd1df0a1c76a07fee2d218394 | import os, os.path
import subprocess
from distutils.core import setup
from py2exe.build_exe import py2exe
PROGRAM_NAME = 'icom_app'
PROGRAM_DESC = 'simple icom app'
NSIS_SCRIPT_TEMPLATE = r"""
!define py2exeOutputDirectory '{output_dir}\'
!define exe '{program_name}.exe'
; Uses solid LZMA compression. Can be slow, use discretion.
SetCompressor /SOLID lzma
; Sets the title bar text (although NSIS seems to append "Installer")
Caption "{program_desc}"
Name '{program_name}'
OutFile ${{exe}}
Icon '{icon_location}'
; Use XPs styles where appropriate
XPStyle on
; You can opt for a silent install, but if your packaged app takes a long time
; to extract, users might get confused. The method used here is to show a dialog
; box with a progress bar as the installer unpacks the data.
;SilentInstall silent
AutoCloseWindow true
ShowInstDetails nevershow
Section
DetailPrint "Extracting application..."
SetDetailsPrint none
InitPluginsDir
SetOutPath '$PLUGINSDIR'
File /r '${{py2exeOutputDirectory}}\*'
GetTempFileName $0
;DetailPrint $0
Delete $0
StrCpy $0 '$0.bat'
FileOpen $1 $0 'w'
FileWrite $1 '@echo off$\r$\n'
StrCpy $2 $TEMP 2
FileWrite $1 '$2$\r$\n'
FileWrite $1 'cd $PLUGINSDIR$\r$\n'
FileWrite $1 '${{exe}}$\r$\n'
FileClose $1
; Hide the window just before the real app launches. Otherwise you have two
; programs with the same icon hanging around, and it's confusing.
HideWindow
nsExec::Exec $0
Delete $0
SectionEnd
"""
class NSISScript(object):
NSIS_COMPILE = "makensis"
def __init__(self, program_name, program_desc, dist_dir, icon_loc):
self.program_name = program_name
self.program_desc = program_desc
self.dist_dir = dist_dir
self.icon_loc = icon_loc
self.pathname = "setup_%s.nsi" % self.program_name
def create(self):
contents = NSIS_SCRIPT_TEMPLATE.format(
program_name = self.program_name,
program_desc = self.program_desc,
output_dir = self.dist_dir,
icon_location = os.path.join(self.dist_dir, self.icon_loc))
with open(self.pathname, "w") as outfile:
outfile.write(contents)
def compile(self):
subproc = subprocess.Popen(
# "/P5" uses realtime priority for the LZMA compression stage.
# This can get annoying though.
[self.NSIS_COMPILE, self.pathname, "/P5"], env=os.environ)
subproc.communicate()
retcode = subproc.returncode
if retcode:
raise RuntimeError("NSIS compilation return code: %d" % retcode)
class build_installer(py2exe):
# This class first builds the exe file(s), then creates an NSIS installer
# that runs your program from a temporary directory.
def run(self):
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# Create the installer, using the files py2exe has created.
script = NSISScript(PROGRAM_NAME,
PROGRAM_DESC,
dist_dir,
os.path.join('.', 'icon.ico'))
print "*** creating the NSIS setup script***"
script.create()
print "*** compiling the NSIS setup script***"
script.compile()
zipfile = r"lib\shardlib"
setup(
name = 'MyApp',
description = 'My Application',
version = '1.0',
window = [
{
'script': os.path.join('.','ICOM.py'),
'icon_resources': [(1, os.path.join('.', 'icom.ico'))],
'dest_base': PROGRAM_NAME,
},
],
options = {
'py2exe': {
# Py2exe options...
"optimize": 2
}
},
zipfile = zipfile,
data_files = [],# etc...
cmdclass = {"py2exe": build_installer},
) | [] |
CharlottePouw/interpreting-complexity | src/lingcomp/farm/features.py | b9a73c0aff18e4c6b4209a6511d00639494c70da | import torch
from farm.data_handler.samples import Sample
from farm.modeling.prediction_head import RegressionHead
class FeaturesEmbeddingSample(Sample):
def __init__(self, id, clear_text, tokenized=None, features=None, feat_embeds=None):
super().__init__(id, clear_text, tokenized, features)
self.feats_embed = feat_embeds
class FeaturesRegressionHead(RegressionHead):
"""A regression head mixing [CLS] representation
and explicit features for prediction"""
def forward(self, x, feats, **kwargs):
x = torch.cat((x, feats), 1)
logits = self.feed_forward(x)
return logits
| [((17, 12, 17, 36), 'torch.cat', 'torch.cat', ({(17, 22, 17, 32): '(x, feats)', (17, 34, 17, 35): '1'}, {}), '((x, feats), 1)', False, 'import torch\n')] |
UN-ICC/icc-digital-id-manager | manager/tests/api_view_test_classes.py | aca0109b3202b292145326ec5523ee8f24691a83 | import pytest
from rest_framework import status
from rest_framework.test import APIClient
class TestBase:
__test__ = False
path = None
get_data = {}
put_data = {}
post_data = {}
delete_data = {}
requires_auth = True
implements_retrieve = False
implements_create = False
implements_update = False
implements_destroy = False
client = APIClient()
@pytest.fixture
def setup(self, setup_method=None):
return setup_method
@pytest.fixture
def authenticate(self, api_client_admin):
self.client = api_client_admin
class TestGet(TestBase):
@pytest.fixture
def get_response(self):
return self.client.get(f"/{self.path}", self.get_data, format="json",)
def test_get_without_authentication(self, setup, get_response):
if not self.requires_auth:
if not self.implements_retrieve:
returns_status_code_http_405_not_allowed(get_response)
else:
returns_status_code_http_200_ok(get_response)
response_has_etag(get_response)
else:
returns_status_code_http_401_unauthorized(get_response)
def test_get_with_authentication(self, setup, authenticate, get_response):
if not self.implements_retrieve:
returns_status_code_http_405_not_allowed(get_response)
else:
returns_status_code_http_200_ok(get_response)
response_has_etag(get_response)
class TestPost(TestBase):
@pytest.fixture
def post_response(self):
return self.client.post(
path=f"/{self.path}", data=self.post_data, format="json",
)
def test_post_without_authentication(self, setup, post_response):
returns_status_code_http_401_unauthorized(post_response)
def test_post_with_authentication(self, setup, authenticate, post_response):
if self.implements_create:
returns_status_code_http_201_created(post_response)
else:
returns_status_code_http_405_not_allowed(post_response)
class TestPut(TestBase):
@pytest.fixture
def put_response(self):
return self.client.put(f"/{self.path}", self.put_data, format="json",)
def test_put_without_authentication(self, setup, put_response):
if not self.requires_auth:
if self.implements_update:
returns_status_code_http_200_ok(put_response)
else:
returns_status_code_http_405_not_allowed(put_response)
else:
returns_status_code_http_401_unauthorized(put_response)
def test_put_with_authentication(self, setup, authenticate, put_response):
if not self.implements_update:
returns_status_code_http_405_not_allowed(put_response)
elif self.requires_auth:
returns_status_code_http_200_ok(put_response)
else:
returns_status_code_http_401_unauthorized(put_response)
class TestDelete(TestBase):
@pytest.fixture
def delete_response(self):
return self.client.delete(f"/{self.path}", self.delete_data, format="json")
def test_delete_without_authentication(self, setup, delete_response):
if not self.requires_auth:
if self.implements_destroy:
returns_status_code_http_204_no_content(delete_response)
else:
returns_status_code_http_405_not_allowed(delete_response)
else:
returns_status_code_http_401_unauthorized(delete_response)
def test_delete_with_authentication(self, setup, authenticate, delete_response):
if not self.implements_destroy:
returns_status_code_http_405_not_allowed(delete_response)
elif self.requires_auth:
returns_status_code_http_204_no_content(delete_response)
else:
returns_status_code_http_401_unauthorized(delete_response)
class TestView(TestGet, TestPost, TestPut, TestDelete):
__test__ = False
requires_auth = True
class TestListCreateAPIView(TestView):
__test__ = False
implements_retrieve = True
implements_create = True
requires_auth = True
class TestRetrieveAPIView(TestView):
__test__ = False
implements_retrieve = True
requires_auth = True
class TestUnauthenticatedRetrieveAPIView(TestView):
__test__ = False
implements_retrieve = True
requires_auth = False
def returns_status_code_http_200_ok(response):
assert response.status_code == status.HTTP_200_OK
def returns_status_code_http_401_unauthorized(response):
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def returns_status_code_http_201_created(response):
assert response.status_code == status.HTTP_201_CREATED
def returns_status_code_http_204_no_content(response):
assert response.status_code == status.HTTP_204_NO_CONTENT
def returns_status_code_http_405_not_allowed(response):
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def response_has_etag(response):
assert response.get("ETag")
| [((19, 13, 19, 24), 'rest_framework.test.APIClient', 'APIClient', ({}, {}), '()', False, 'from rest_framework.test import APIClient\n')] |
TrustyJAID/Toxic-Cogs | dashboard/dashboard.py | 870d92067ba2a99b9ade2f957f945b95fdbc80f7 | from collections import defaultdict
import discord
from redbot.core import Config, checks, commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import box, humanize_list, inline
from abc import ABC
# ABC Mixins
from dashboard.abc.abc import MixinMeta
from dashboard.abc.mixin import DBMixin, dashboard
# Command Mixins
from dashboard.abc.roles import DashboardRolesMixin
from dashboard.abc.webserver import DashboardWebserverMixin
from dashboard.abc.settings import DashboardSettingsMixin
# RPC Mixins
from dashboard.baserpc import HUMANIZED_PERMISSIONS, DashboardRPC
from dashboard.menus import ClientList, ClientMenu
THEME_COLORS = ["red", "primary", "blue", "green", "greener", "yellow"]
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""This allows the metaclass used for proper type detection to coexist with discord.py's
metaclass."""
# Thanks to Flare for showing how to use group commands across multiple files. If this breaks, its his fault
class Dashboard(
DashboardRolesMixin,
DashboardWebserverMixin,
DashboardSettingsMixin,
DBMixin,
commands.Cog,
metaclass=CompositeMetaClass,
):
__version__ = "0.1.6a"
def __init__(self, bot: Red, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bot = bot
self.config = Config.get_conf(self, identifier=473541068378341376)
self.config.register_global(
secret="[Not set]",
redirect="http://127.0.0.1:42356/callback",
clientid=0,
blacklisted=[],
disallowedperms=[],
support="",
defaultcolor="red",
meta={"title": "", "icon": "", "description": "", "color": ""},
)
self.config.register_guild(roles=[])
self.configcache = defaultdict(self.cache_defaults)
self.rpc = DashboardRPC(self)
def cog_unload(self):
self.configcache.clear()
self.rpc.unload()
def cache_defaults(self):
return {"roles": []}
async def initialize(self):
config = await self.config.all_guilds()
for k, v in config.items():
self.configcache[k] = v
| [((46, 22, 46, 74), 'redbot.core.Config.get_conf', 'Config.get_conf', (), '', False, 'from redbot.core import Config, checks, commands\n'), ((58, 27, 58, 59), 'collections.defaultdict', 'defaultdict', ({(58, 39, 58, 58): 'self.cache_defaults'}, {}), '(self.cache_defaults)', False, 'from collections import defaultdict\n'), ((60, 19, 60, 37), 'dashboard.baserpc.DashboardRPC', 'DashboardRPC', ({(60, 32, 60, 36): 'self'}, {}), '(self)', False, 'from dashboard.baserpc import HUMANIZED_PERMISSIONS, DashboardRPC\n')] |
hopeness/leetcode | algorithms/162.Find-Peak-Element/Python/solution_2.py | 496455fa967f0704d729b4014f92f52b1d69d690 | """
https://leetcode.com/problems/find-peak-element/submissions/
"""
from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
l, r = 0, len(nums)-1
while l < r:
lmid = (l + r) // 2
rmid = lmid + 1
if nums[lmid] < nums[rmid]:
l = lmid + 1
else:
r = rmid - 1
return l
| [] |
vinbigdata-medical/MIDL2021-Xray-Classification | data_loader.py | 51359126d07573053059c36e3cd95a7fd7100e0e | from torchvision.datasets import ImageFolder
from torchvision import transforms
import random
import os
import torch
from torch.utils.data.dataloader import DataLoader
from utils import constants, get_default_device
from image_folder_with_path import ImageFolderWithPaths
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
""" wrap a Dataloader to move data to a device """
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
""" yield a batch of data after moving it to device """
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
""" return number of batch size """
return len(self.dl)
default_device = get_default_device.default_device
train_transforms = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(degrees=random.uniform(5, 10)),
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
test_transforms = transforms.Compose([
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
classes = os.listdir(constants.DATA_PATH + constants.TRAIN_PATH)
training_dataset = ImageFolder(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
valid_dataset = ImageFolder(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
# testing_dataset = ImageFolder(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
# training_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
# valid_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
testing_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
torch.manual_seed(constants.RANDOM_SEED)
train_dl = DataLoader(training_dataset, constants.BATCH_SIZE, shuffle=True, num_workers=8, pin_memory=True)
val_dl = DataLoader(valid_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
test_dl = DataLoader(testing_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
"""
Now we can wrap our training and validation data loaders using DeviceDataLoader for automatically transferring batches
of data to GPU (if available), and use to_device to move our model to GPU (if available)
"""
train_dl = DeviceDataLoader(train_dl, default_device)
val_dl = DeviceDataLoader(val_dl, default_device)
test_dl = DeviceDataLoader(test_dl, default_device) | [((48, 10, 48, 64), 'os.listdir', 'os.listdir', ({(48, 21, 48, 63): 'constants.DATA_PATH + constants.TRAIN_PATH'}, {}), '(constants.DATA_PATH + constants.TRAIN_PATH)', False, 'import os\n'), ((50, 19, 50, 102), 'torchvision.datasets.ImageFolder', 'ImageFolder', (), '', False, 'from torchvision.datasets import ImageFolder\n'), ((51, 16, 51, 96), 'torchvision.datasets.ImageFolder', 'ImageFolder', (), '', False, 'from torchvision.datasets import ImageFolder\n'), ((56, 18, 56, 108), 'image_folder_with_path.ImageFolderWithPaths', 'ImageFolderWithPaths', (), '', False, 'from image_folder_with_path import ImageFolderWithPaths\n'), ((59, 0, 59, 40), 'torch.manual_seed', 'torch.manual_seed', ({(59, 18, 59, 39): 'constants.RANDOM_SEED'}, {}), '(constants.RANDOM_SEED)', False, 'import torch\n'), ((61, 11, 61, 107), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data.dataloader import DataLoader\n'), ((62, 9, 62, 88), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data.dataloader import DataLoader\n'), ((63, 10, 63, 91), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data.dataloader import DataLoader\n'), ((37, 4, 37, 42), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', (), '', False, 'from torchvision import transforms\n'), ((39, 4, 39, 33), 'torchvision.transforms.Resize', 'transforms.Resize', ({(39, 22, 39, 32): '(512, 512)'}, {}), '((512, 512))', False, 'from torchvision import transforms\n'), ((40, 4, 40, 25), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import transforms\n'), ((44, 4, 44, 33), 'torchvision.transforms.Resize', 'transforms.Resize', ({(44, 22, 44, 32): '(512, 512)'}, {}), '((512, 512))', False, 'from torchvision import transforms\n'), ((45, 4, 45, 25), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import transforms\n'), ((38, 38, 38, 59), 'random.uniform', 'random.uniform', ({(38, 53, 38, 54): '5', (38, 56, 38, 58): '10'}, {}), '(5, 10)', False, 'import random\n')] |
sjpfenninger/calliope | calliope/test/test_analysis.py | a4e49c3b7d37f908bafc84543510eec0b4cf5d9f | # import matplotlib
# matplotlib.use('Qt5Agg') # Prevents `Invalid DISPLAY variable` errors
import pytest
import tempfile
from calliope import Model
from calliope.utils import AttrDict
from calliope import analysis
from . import common
from .common import assert_almost_equal, solver, solver_io
import matplotlib.pyplot as plt
plt.switch_backend('agg') # Prevents `Invalid DISPLAY variable` errors
class TestModel:
@pytest.fixture(scope='module')
def model(self):
locations = """
locations:
1:
techs: ['ccgt', 'demand_power']
override:
ccgt:
constraints:
e_cap.max: 100
demand_power:
constraints:
r: -50
metadata:
map_boundary: [-10, 35, 5, 45]
location_coordinates:
1: [40, -2]
links:
"""
config_run = """
mode: plan
model: ['{techs}', '{locations}']
subset_t: ['2005-01-01', '2005-01-02']
"""
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(locations.encode('utf-8'))
f.read()
override_dict = AttrDict({
'solver': solver,
'solver_io': solver_io,
})
model = common.simple_model(config_run=config_run,
config_locations=f.name,
override=override_dict)
model.run()
return model
@pytest.fixture(scope='module')
def builtin_model(self):
model = Model()
model.run()
return model
def test_plot_carrier_production(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_carrier_production(model.solution)
def test_plot_timeseries(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_timeseries(model.solution,
model.solution['e'].loc[dict(c='power')].sum(dim='x'),
carrier='power', demand='demand_power')
def test_plot_installed_capacities(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_installed_capacities(model.solution)
def test_plot_transmission(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_transmission(model.solution, map_resolution='c')
def test_get_delivered_cost(self, model):
# TODO this should be tested with a more complex model
assert_almost_equal(analysis.get_delivered_cost(model.solution), 0.1)
def test_get_levelized_cost(self, model):
lcoe = analysis.get_levelized_cost(model.solution)
assert_almost_equal(lcoe.at['ccgt'], 0.1)
def test_get_group_share(self, model):
# TODO this should be tested with a more complex model
share = analysis.get_group_share(model.solution, techs=['ccgt'])
assert share == 1.0
def test_get_unmet_demand_hours(self, builtin_model):
# TODO this should be tested with a more complex model
unmet = analysis.get_unmet_demand_hours(builtin_model.solution)
assert unmet == 1
def test_recompute_levelized_costs(self, model):
# Cost in solution
sol = model.solution
assert_almost_equal(sol['summary'].to_pandas().loc['ccgt', 'levelized_cost_monetary'], 0.1)
# Recomputed cost must be the same
dm = analysis.SolutionModel(model.solution)
recomputed = dm.recompute_levelized_costs('ccgt')
assert_almost_equal(recomputed['total'], 0.1)
def test_recompute_levelized_costs_after_changes(self, model):
# Make changes
dm = analysis.SolutionModel(model.solution)
dm.config_model.techs.ccgt.costs.monetary.e_cap = 50
dm.config_model.techs.ccgt.costs.monetary.om_fuel = 1.0
# Recomputed cost
recomputed = dm.recompute_levelized_costs('ccgt')
assert_almost_equal(recomputed['total'], 1.0, tolerance=0.001)
| [((16, 0, 16, 25), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', ({(16, 19, 16, 24): '"""agg"""'}, {}), "('agg')", True, 'import matplotlib.pyplot as plt\n'), ((20, 5, 20, 35), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((57, 5, 57, 35), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((59, 16, 59, 23), 'calliope.Model', 'Model', ({}, {}), '()', False, 'from calliope import Model\n'), ((65, 8, 65, 56), 'calliope.analysis.plot_carrier_production', 'analysis.plot_carrier_production', ({(65, 41, 65, 55): 'model.solution'}, {}), '(model.solution)', False, 'from calliope import analysis\n'), ((75, 8, 75, 58), 'calliope.analysis.plot_installed_capacities', 'analysis.plot_installed_capacities', ({(75, 43, 75, 57): 'model.solution'}, {}), '(model.solution)', False, 'from calliope import analysis\n'), ((79, 8, 79, 70), 'calliope.analysis.plot_transmission', 'analysis.plot_transmission', (), '', False, 'from calliope import analysis\n'), ((86, 15, 86, 58), 'calliope.analysis.get_levelized_cost', 'analysis.get_levelized_cost', ({(86, 43, 86, 57): 'model.solution'}, {}), '(model.solution)', False, 'from calliope import analysis\n'), ((91, 16, 91, 72), 'calliope.analysis.get_group_share', 'analysis.get_group_share', (), '', False, 'from calliope import analysis\n'), ((96, 16, 96, 71), 'calliope.analysis.get_unmet_demand_hours', 'analysis.get_unmet_demand_hours', ({(96, 48, 96, 70): 'builtin_model.solution'}, {}), '(builtin_model.solution)', False, 'from calliope import analysis\n'), ((104, 13, 104, 51), 'calliope.analysis.SolutionModel', 'analysis.SolutionModel', ({(104, 36, 104, 50): 'model.solution'}, {}), '(model.solution)', False, 'from calliope import analysis\n'), ((110, 13, 110, 51), 'calliope.analysis.SolutionModel', 'analysis.SolutionModel', ({(110, 36, 110, 50): 'model.solution'}, {}), '(model.solution)', False, 'from calliope import analysis\n'), ((44, 13, 44, 54), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (), '', False, 'import tempfile\n'), ((47, 28, 50, 14), 'calliope.utils.AttrDict', 'AttrDict', ({(47, 37, 50, 13): "{'solver': solver, 'solver_io': solver_io}"}, {}), "({'solver': solver, 'solver_io': solver_io})", False, 'from calliope.utils import AttrDict\n'), ((83, 28, 83, 71), 'calliope.analysis.get_delivered_cost', 'analysis.get_delivered_cost', ({(83, 56, 83, 70): 'model.solution'}, {}), '(model.solution)', False, 'from calliope import analysis\n')] |
TzuTingWei/mol | mol/data/reader.py | 9499925443f389d8e960b6d656f2953d21df3e3b | import os
from mol.util import read_xyz
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, 'look_and_say.dat')
with open(filename, 'r') as handle:
look_and_say = handle.read()
def get_molecule(filename):
return read_xyz(os.path.join(dirname, filename + ".xyz"))
| [((5, 11, 5, 52), 'os.path.join', 'os.path.join', ({(5, 24, 5, 31): 'dirname', (5, 33, 5, 51): '"""look_and_say.dat"""'}, {}), "(dirname, 'look_and_say.dat')", False, 'import os\n'), ((4, 26, 4, 51), 'os.path.abspath', 'os.path.abspath', ({(4, 42, 4, 50): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((11, 17, 11, 57), 'os.path.join', 'os.path.join', ({(11, 30, 11, 37): 'dirname', (11, 39, 11, 56): "(filename + '.xyz')"}, {}), "(dirname, filename + '.xyz')", False, 'import os\n')] |
lightsey/cinder | cinder/tests/unit/targets/test_spdknvmf.py | e03d68e42e57a63f8d0f3e177fb4287290612b24 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from unittest import mock
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.targets import spdknvmf as spdknvmf_driver
BDEVS = [{
"num_blocks": 4096000,
"name": "Nvme0n1",
"driver_specific": {
"nvme": {
"trid": {
"trtype": "PCIe",
"traddr": "0000:00:04.0"
},
"ns_data": {
"id": 1
},
"pci_address": "0000:00:04.0",
"vs": {
"nvme_version": "1.1"
},
"ctrlr_data": {
"firmware_revision": "1.0",
"serial_number": "deadbeef",
"oacs": {
"ns_manage": 0,
"security": 0,
"firmware": 0,
"format": 0
},
"vendor_id": "0x8086",
"model_number": "QEMU NVMe Ctrl"
},
"csts": {
"rdy": 1,
"cfs": 0
}
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": True,
"unmap": False,
"read": True,
"write_zeroes": False,
"write": True,
"flush": True,
"nvme_io": True
},
"claimed": False,
"block_size": 512,
"product_name": "NVMe disk",
"aliases": ["Nvme0n1"]
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p0"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p1"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"lvs_test/lvol0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297"
}, {
"num_blocks": 8192,
"uuid": "8dec1964-d533-41df-bea7-40520efdb416",
"aliases": [
"lvs_test/lvol1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": True
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967298"
}]
NVMF_SUBSYSTEMS = [{
"listen_addresses": [],
"subtype": "Discovery",
"nqn": "nqn.2014-08.org.nvmexpress.discovery",
"hosts": [],
"allow_any_host": True
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [{
"nqn": "nqn.2016-06.io.spdk:init"
}],
"namespaces": [{
"bdev_name": "Nvme0n1p0",
"nsid": 1,
"name": "Nvme0n1p0"
}],
"allow_any_host": False,
"serial_number": "SPDK00000000000001",
"nqn": "nqn.2016-06.io.spdk:cnode1"
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": "SPDK00000000000002",
"nqn": "nqn.2016-06.io.spdk:cnode2"
}]
class JSONRPCException(Exception):
def __init__(self, message):
self.message = message
class JSONRPCClient(object):
def __init__(self, addr=None, port=None):
self.methods = {"bdev_get_bdevs": self.get_bdevs,
"construct_nvmf_subsystem":
self.construct_nvmf_subsystem,
"nvmf_delete_subsystem": self.delete_nvmf_subsystem,
"nvmf_create_subsystem": self.nvmf_subsystem_create,
"nvmf_subsystem_add_listener":
self.nvmf_subsystem_add_listener,
"nvmf_subsystem_add_ns":
self.nvmf_subsystem_add_ns,
"nvmf_get_subsystems": self.get_nvmf_subsystems}
self.bdevs = copy.deepcopy(BDEVS)
self.nvmf_subsystems = copy.deepcopy(NVMF_SUBSYSTEMS)
def __del__(self):
pass
def get_bdevs(self, params=None):
if params and 'name' in params:
for bdev in self.bdevs:
for alias in bdev['aliases']:
if params['name'] in alias:
return json.dumps({"result": [bdev]})
if bdev['name'] == params['name']:
return json.dumps({"result": [bdev]})
return json.dumps({"error": "Not found"})
return json.dumps({"result": self.bdevs})
def get_nvmf_subsystems(self, params=None):
return json.dumps({"result": self.nvmf_subsystems})
def construct_nvmf_subsystem(self, params=None):
nvmf_subsystem = {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": params['serial_number'],
"nqn": params['nqn']
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def delete_nvmf_subsystem(self, params=None):
found_id = -1
i = 0
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
found_id = i
i += 1
if found_id != -1:
del self.nvmf_subsystems[found_id]
return json.dumps({"result": {}})
def nvmf_subsystem_create(self, params=None):
nvmf_subsystem = {
"namespaces": [],
"nqn": params['nqn'],
"serial_number": "S0000000000000000001",
"allow_any_host": False,
"subtype": "NVMe",
"hosts": [],
"listen_addresses": []
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def nvmf_subsystem_add_listener(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['listen_addresses'].append(
params['listen_address']
)
return json.dumps({"result": ""})
def nvmf_subsystem_add_ns(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['namespaces'].append(
params['namespace']
)
return json.dumps({"result": ""})
def call(self, method, params=None):
req = {}
req['jsonrpc'] = '2.0'
req['method'] = method
req['id'] = 1
if (params):
req['params'] = params
response = json.loads(self.methods[method](params))
if not response:
return {}
if 'error' in response:
msg = "\n".join(["Got JSON-RPC error response",
"request:",
json.dumps(req, indent=2),
"response:",
json.dumps(response['error'], indent=2)])
raise JSONRPCException(msg)
return response['result']
class Target(object):
def __init__(self, name="Nvme0n1p0"):
self.name = name
class SpdkNvmfDriverTestCase(test.TestCase):
def setUp(self):
super(SpdkNvmfDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.target_ip_address = '192.168.0.1'
self.configuration.target_port = '4420'
self.configuration.target_prefix = ""
self.configuration.nvmet_port_id = "1"
self.configuration.nvmet_ns_id = "fake_id"
self.configuration.nvmet_subsystem_name = "nqn.2014-08.io.spdk"
self.configuration.target_protocol = "nvmet_rdma"
self.configuration.spdk_rpc_ip = "127.0.0.1"
self.configuration.spdk_rpc_port = 8000
self.driver = spdknvmf_driver.SpdkNvmf(configuration=
self.configuration)
self.jsonrpcclient = JSONRPCClient()
def test__get_spdk_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
bdevs = self.driver._rpc_call("bdev_get_bdevs")
bdev_name = bdevs[0]['name']
volume_name = self.driver._get_spdk_volume_name(bdev_name)
self.assertEqual(bdev_name, volume_name)
volume_name = self.driver._get_spdk_volume_name("fake")
self.assertIsNone(volume_name)
def test__get_nqn_with_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
nqn = self.driver._get_nqn_with_volume_name("Nvme0n1p0")
nqn_tmp = self.driver._rpc_call("nvmf_get_subsystems")[1]['nqn']
self.assertEqual(nqn, nqn_tmp)
nqn = self.driver._get_nqn_with_volume_name("fake")
self.assertIsNone(nqn)
def test__get_first_free_node(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
free_node = self.driver._get_first_free_node()
self.assertEqual(3, free_node)
def test_create_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("nvmf_get_subsystems")
self.driver.create_nvmeof_target("Nvme0n1p1",
"nqn.2016-06.io.spdk",
"192.168.0.1",
4420, "rdma", -1, -1, "")
subsystems_last = self.driver._rpc_call("nvmf_get_subsystems")
self.assertEqual(len(subsystems_first) + 1, len(subsystems_last))
def test_delete_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("nvmf_get_subsystems")
target = Target()
self.driver.delete_nvmeof_target(target)
subsystems_last = self.driver._rpc_call("nvmf_get_subsystems")
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))
target.name = "fake"
self.driver.delete_nvmeof_target(target)
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))
| [((233, 21, 233, 41), 'copy.deepcopy', 'copy.deepcopy', ({(233, 35, 233, 40): 'BDEVS'}, {}), '(BDEVS)', False, 'import copy\n'), ((234, 31, 234, 61), 'copy.deepcopy', 'copy.deepcopy', ({(234, 45, 234, 60): 'NVMF_SUBSYSTEMS'}, {}), '(NVMF_SUBSYSTEMS)', False, 'import copy\n'), ((249, 15, 249, 49), 'json.dumps', 'json.dumps', ({(249, 26, 249, 48): "{'result': self.bdevs}"}, {}), "({'result': self.bdevs})", False, 'import json\n'), ((252, 15, 252, 59), 'json.dumps', 'json.dumps', ({(252, 26, 252, 58): "{'result': self.nvmf_subsystems}"}, {}), "({'result': self.nvmf_subsystems})", False, 'import json\n'), ((270, 15, 270, 53), 'json.dumps', 'json.dumps', ({(270, 26, 270, 52): "{'result': nvmf_subsystem}"}, {}), "({'result': nvmf_subsystem})", False, 'import json\n'), ((283, 15, 283, 41), 'json.dumps', 'json.dumps', ({(283, 26, 283, 40): "{'result': {}}"}, {}), "({'result': {}})", False, 'import json\n'), ((298, 15, 298, 53), 'json.dumps', 'json.dumps', ({(298, 26, 298, 52): "{'result': nvmf_subsystem}"}, {}), "({'result': nvmf_subsystem})", False, 'import json\n'), ((307, 15, 307, 41), 'json.dumps', 'json.dumps', ({(307, 26, 307, 40): "{'result': ''}"}, {}), "({'result': ''})", False, 'import json\n'), ((316, 15, 316, 41), 'json.dumps', 'json.dumps', ({(316, 26, 316, 40): "{'result': ''}"}, {}), "({'result': ''})", False, 'import json\n'), ((348, 29, 348, 58), 'unittest.mock.Mock', 'mock.Mock', ({(348, 39, 348, 57): 'conf.Configuration'}, {}), '(conf.Configuration)', False, 'from unittest import mock\n'), ((358, 22, 359, 66), 'cinder.volume.targets.spdknvmf.SpdkNvmf', 'spdknvmf_driver.SpdkNvmf', (), '', True, 'from cinder.volume.targets import spdknvmf as spdknvmf_driver\n'), ((247, 19, 247, 53), 'json.dumps', 'json.dumps', ({(247, 30, 247, 52): "{'error': 'Not found'}"}, {}), "({'error': 'Not found'})", False, 'import json\n'), ((363, 13, 364, 55), 'unittest.mock.patch.object', 'mock.patch.object', ({(363, 31, 363, 42): 'self.driver', (363, 44, 363, 55): '"""_rpc_call"""', (364, 31, 364, 54): 'self.jsonrpcclient.call'}, {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)", False, 'from unittest import mock\n'), ((373, 13, 374, 55), 'unittest.mock.patch.object', 'mock.patch.object', ({(373, 31, 373, 42): 'self.driver', (373, 44, 373, 55): '"""_rpc_call"""', (374, 31, 374, 54): 'self.jsonrpcclient.call'}, {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)", False, 'from unittest import mock\n'), ((382, 13, 383, 55), 'unittest.mock.patch.object', 'mock.patch.object', ({(382, 31, 382, 42): 'self.driver', (382, 44, 382, 55): '"""_rpc_call"""', (383, 31, 383, 54): 'self.jsonrpcclient.call'}, {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)", False, 'from unittest import mock\n'), ((388, 13, 389, 55), 'unittest.mock.patch.object', 'mock.patch.object', ({(388, 31, 388, 42): 'self.driver', (388, 44, 388, 55): '"""_rpc_call"""', (389, 31, 389, 54): 'self.jsonrpcclient.call'}, {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)", False, 'from unittest import mock\n'), ((399, 13, 400, 55), 'unittest.mock.patch.object', 'mock.patch.object', ({(399, 31, 399, 42): 'self.driver', (399, 44, 399, 55): '"""_rpc_call"""', (400, 31, 400, 54): 'self.jsonrpcclient.call'}, {}), "(self.driver, '_rpc_call', self.jsonrpcclient.call)", False, 'from unittest import mock\n'), ((246, 27, 246, 57), 'json.dumps', 'json.dumps', ({(246, 38, 246, 56): "{'result': [bdev]}"}, {}), "({'result': [bdev]})", False, 'import json\n'), ((332, 29, 332, 54), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((334, 29, 334, 68), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((244, 31, 244, 61), 'json.dumps', 'json.dumps', ({(244, 42, 244, 60): "{'result': [bdev]}"}, {}), "({'result': [bdev]})", False, 'import json\n')] |
yizhang7210/Acre | server/algos/euler/transformer.py | c98cf8a4fdfb223a1958e8e61df759f889a1b13f | """ This is algos.euler.transformer module.
This module is responsible for transforming raw candle data into training
samples usable to the Euler algorithm.
"""
import datetime
import decimal
from algos.euler.models import training_samples as ts
from core.models import instruments
from datasource.models import candles
TWO_PLACES = decimal.Decimal('0.01')
def extract_features(day_candle):
""" Extract the features for the learning algorithm from a daily candle.
The Features are:
high_bid, low_bid, close_bid, open_ask, high_ask, low_ask,
and close_ask (all relative to open_bid) in pips.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
features: List of Decimals. The features described above, all in two
decimal places.
"""
multiplier = day_candle.instrument.multiplier
features = [
day_candle.high_bid,
day_candle.low_bid,
day_candle.close_bid,
day_candle.open_ask,
day_candle.high_ask,
day_candle.low_ask,
day_candle.close_ask,
]
features = [multiplier * (x - day_candle.open_bid) for x in features]
features = [decimal.Decimal(x).quantize(TWO_PLACES) for x in features]
return features
def get_profitable_change(day_candle):
""" Get the potential daily profitable price change in pips.
If prices rise enough, we have: close_bid - open_ask (> 0), buy.
If prices fall enough, we have: close_ask - open_bid (< 0), sell.
if prices stay relatively still, we don't buy or sell. It's 0.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
profitable_change: Decimal. The profitable rate change described
above, in two decimal places.
"""
multiplier = day_candle.instrument.multiplier
change = 0
if day_candle.close_bid > day_candle.open_ask:
change = multiplier * (day_candle.close_bid - day_candle.open_ask)
elif day_candle.close_ask < day_candle.open_bid:
change = multiplier * (day_candle.close_ask - day_candle.open_bid)
return decimal.Decimal(change).quantize(TWO_PLACES)
def build_sample_row(candle_previous, candle_next):
""" Build one training sample from two consecutive days of candles.
Args:
candle_previous: candles.Candle object. Candle of first day.
candle_next: candles.Candle object. Candle of second day.
Returns:
sample: TrainingSample object. One training sample for learning.
"""
return ts.create_one(
instrument=candle_next.instrument,
date=candle_next.start_time.date() + datetime.timedelta(1),
features=extract_features(candle_previous),
target=get_profitable_change(candle_next))
def get_start_time(instrument):
""" Get the start time for retrieving candles of the given instrument.
This is determined by the last training sample in the database.
Args:
instrument: Instrument object. The given instrument.
Returns:
start_time: Datetime object. The datetime from which to query
candles from to fill the rest of the training samples.
"""
last_sample = ts.get_last(instrument)
if last_sample is not None:
start_date = last_sample.date - datetime.timedelta(1)
return datetime.datetime.combine(start_date, datetime.time())
return datetime.datetime(2005, 1, 1)
def run():
""" Update the training samples in the database from the latest candles.
This should be run daily to ensure the training set is up-to-date.
Args:
None.
"""
all_new_samples = []
for instrument in instruments.get_all():
start_time = get_start_time(instrument)
new_candles = candles.get_candles(
instrument=instrument, start=start_time, order_by='start_time')
for i in range(len(new_candles) - 1):
all_new_samples.append(
build_sample_row(new_candles[i], new_candles[i + 1]))
ts.insert_many(all_new_samples)
| [((12, 13, 12, 36), 'decimal.Decimal', 'decimal.Decimal', ({(12, 29, 12, 35): '"""0.01"""'}, {}), "('0.01')", False, 'import decimal\n'), ((95, 18, 95, 41), 'algos.euler.models.training_samples.get_last', 'ts.get_last', ({(95, 30, 95, 40): 'instrument'}, {}), '(instrument)', True, 'from algos.euler.models import training_samples as ts\n'), ((100, 11, 100, 40), 'datetime.datetime', 'datetime.datetime', ({(100, 29, 100, 33): '(2005)', (100, 35, 100, 36): '(1)', (100, 38, 100, 39): '(1)'}, {}), '(2005, 1, 1)', False, 'import datetime\n'), ((111, 22, 111, 43), 'core.models.instruments.get_all', 'instruments.get_all', ({}, {}), '()', False, 'from core.models import instruments\n'), ((119, 4, 119, 35), 'algos.euler.models.training_samples.insert_many', 'ts.insert_many', ({(119, 19, 119, 34): 'all_new_samples'}, {}), '(all_new_samples)', True, 'from algos.euler.models import training_samples as ts\n'), ((113, 22, 114, 75), 'datasource.models.candles.get_candles', 'candles.get_candles', (), '', False, 'from datasource.models import candles\n'), ((64, 11, 64, 34), 'decimal.Decimal', 'decimal.Decimal', ({(64, 27, 64, 33): 'change'}, {}), '(change)', False, 'import decimal\n'), ((97, 40, 97, 61), 'datetime.timedelta', 'datetime.timedelta', ({(97, 59, 97, 60): '(1)'}, {}), '(1)', False, 'import datetime\n'), ((98, 53, 98, 68), 'datetime.time', 'datetime.time', ({}, {}), '()', False, 'import datetime\n'), ((39, 16, 39, 34), 'decimal.Decimal', 'decimal.Decimal', ({(39, 32, 39, 33): 'x'}, {}), '(x)', False, 'import decimal\n'), ((79, 45, 79, 66), 'datetime.timedelta', 'datetime.timedelta', ({(79, 64, 79, 65): '(1)'}, {}), '(1)', False, 'import datetime\n')] |
analyticsftw/diagrams | diagrams/outscale/__init__.py | 217af329a323084bb98031ac1768bc2353e6d9b6 | from diagrams import Node
class _Outscale(Node):
_provider = "outscale"
_icon_dir = "resources/outscale"
fontcolor = "#ffffff"
| [] |
pymango/pymango | misc/python/mango/application/main_driver/logstream.py | b55f831f0194b214e746b2dfb4d9c6671a1abc38 | __doc__ = \
"""
=======================================================================================
Main-driver :obj:`LogStream` variables (:mod:`mango.application.main_driver.logstream`)
=======================================================================================
.. currentmodule:: mango.application.main_driver.logstream
Logging objects/attributes for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Classes
=======
.. autosummary::
:toctree: generated/
LogStream - Message logging for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Attributes
==========
.. autodata:: log
.. autodata:: mstLog
.. autodata:: mstOut
.. autodata:: warnLog
.. autodata:: errLog
"""
import mango
import mango.mpi as mpi
import os
import os.path
import sys
if sys.platform.startswith('linux'):
import DLFCN as dl
_flags = sys.getdlopenflags()
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
from . import _mango_main_driver as _mango_main_driver_so
sys.setdlopenflags(_flags)
else:
from . import _mango_main_driver as _mango_main_driver_so
from mango.core import LogStream
#: Messages sent to stdout, prefixed with :samp:`'P<RANK>'`, where :samp:`<RANK>` is MPI process world rank.
log = _mango_main_driver_so._log
#: Messages sent to stdout, prefixed with :samp:`'MST'`, and messages also saved to history-meta-data.
mstLog = _mango_main_driver_so._mstLog
#: Messages sent to stdout, prefixed with :samp:`'OUT'`.
mstOut = _mango_main_driver_so._mstOut
#: Messages sent to stderr, prefixed with :samp:`'WARNING'`.
warnLog = _mango_main_driver_so._warnLog
#: Messages sent to stderr, prefixed with :samp:`'ERROR'`.
errLog = _mango_main_driver_so._errLog
__all__ = [s for s in dir() if not s.startswith('_')]
| [((42, 3, 42, 35), 'sys.platform.startswith', 'sys.platform.startswith', ({(42, 27, 42, 34): '"""linux"""'}, {}), "('linux')", False, 'import sys\n'), ((44, 13, 44, 33), 'sys.getdlopenflags', 'sys.getdlopenflags', ({}, {}), '()', False, 'import sys\n'), ((45, 4, 45, 50), 'sys.setdlopenflags', 'sys.setdlopenflags', ({(45, 23, 45, 49): '(dl.RTLD_NOW | dl.RTLD_GLOBAL)'}, {}), '(dl.RTLD_NOW | dl.RTLD_GLOBAL)', False, 'import sys\n'), ((47, 4, 47, 30), 'sys.setdlopenflags', 'sys.setdlopenflags', ({(47, 23, 47, 29): '_flags'}, {}), '(_flags)', False, 'import sys\n')] |
luftek/python-ucdev | ucdev/cy7c65211/header.py | 8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9 | # -*- coding: utf-8-unix -*-
import platform
######################################################################
# Platform specific headers
######################################################################
if platform.system() == 'Linux':
src = """
typedef bool BOOL;
"""
######################################################################
# Common headers
######################################################################
src += """
#define CY_STRING_DESCRIPTOR_SIZE 256
#define CY_MAX_DEVICE_INTERFACE 5
#define CY_US_VERSION_MAJOR 1
#define CY_US_VERSION_MINOR 0
#define CY_US_VERSION_PATCH 0
#define CY_US_VERSION 1
#define CY_US_VERSION_BUILD 74
typedef unsigned int UINT32;
typedef unsigned char UINT8;
typedef unsigned short UINT16;
typedef char CHAR;
typedef unsigned char UCHAR;
typedef void* CY_HANDLE;
typedef void (*CY_EVENT_NOTIFICATION_CB_FN)(UINT16 eventsNotified);
typedef struct _CY_VID_PID {
UINT16 vid;
UINT16 pid;
} CY_VID_PID, *PCY_VID_PID;
typedef struct _CY_LIBRARY_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patch;
UINT8 buildNumber;
} CY_LIBRARY_VERSION, *PCY_LIBRARY_VERSION;
typedef struct _CY_FIRMWARE_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patchNumber;
UINT32 buildNumber;
} CY_FIRMWARE_VERSION, *PCY_FIRMWARE_VERSION;
typedef enum _CY_DEVICE_CLASS{
CY_CLASS_DISABLED = 0,
CY_CLASS_CDC = 0x02,
CY_CLASS_PHDC = 0x0F,
CY_CLASS_VENDOR = 0xFF
} CY_DEVICE_CLASS;
typedef enum _CY_DEVICE_TYPE {
CY_TYPE_DISABLED = 0,
CY_TYPE_UART,
CY_TYPE_SPI,
CY_TYPE_I2C,
CY_TYPE_JTAG,
CY_TYPE_MFG
} CY_DEVICE_TYPE;
typedef enum _CY_DEVICE_SERIAL_BLOCK
{
SerialBlock_SCB0 = 0,
SerialBlock_SCB1,
SerialBlock_MFG
} CY_DEVICE_SERIAL_BLOCK;
typedef struct _CY_DEVICE_INFO {
CY_VID_PID vidPid;
UCHAR numInterfaces;
UCHAR manufacturerName [256];
UCHAR productName [256];
UCHAR serialNum [256];
UCHAR deviceFriendlyName [256];
CY_DEVICE_TYPE deviceType [5];
CY_DEVICE_CLASS deviceClass [5];
CY_DEVICE_SERIAL_BLOCK deviceBlock;
} CY_DEVICE_INFO,*PCY_DEVICE_INFO;
typedef struct _CY_DATA_BUFFER {
UCHAR *buffer;
UINT32 length;
UINT32 transferCount;
} CY_DATA_BUFFER,*PCY_DATA_BUFFER;
typedef enum _CY_RETURN_STATUS{
CY_SUCCESS = 0,
CY_ERROR_ACCESS_DENIED,
CY_ERROR_DRIVER_INIT_FAILED,
CY_ERROR_DEVICE_INFO_FETCH_FAILED,
CY_ERROR_DRIVER_OPEN_FAILED,
CY_ERROR_INVALID_PARAMETER,
CY_ERROR_REQUEST_FAILED,
CY_ERROR_DOWNLOAD_FAILED,
CY_ERROR_FIRMWARE_INVALID_SIGNATURE,
CY_ERROR_INVALID_FIRMWARE,
CY_ERROR_DEVICE_NOT_FOUND,
CY_ERROR_IO_TIMEOUT,
CY_ERROR_PIPE_HALTED,
CY_ERROR_BUFFER_OVERFLOW,
CY_ERROR_INVALID_HANDLE,
CY_ERROR_ALLOCATION_FAILED,
CY_ERROR_I2C_DEVICE_BUSY,
CY_ERROR_I2C_NAK_ERROR,
CY_ERROR_I2C_ARBITRATION_ERROR,
CY_ERROR_I2C_BUS_ERROR,
CY_ERROR_I2C_BUS_BUSY,
CY_ERROR_I2C_STOP_BIT_SET,
CY_ERROR_STATUS_MONITOR_EXIST
} CY_RETURN_STATUS;
typedef struct _CY_I2C_CONFIG{
UINT32 frequency;
UINT8 slaveAddress;
BOOL isMaster;
BOOL isClockStretch;
} CY_I2C_CONFIG,*PCY_I2C_CONFIG;
typedef struct _CY_I2C_DATA_CONFIG
{
UCHAR slaveAddress;
BOOL isStopBit;
BOOL isNakBit;
} CY_I2C_DATA_CONFIG, *PCY_I2C_DATA_CONFIG;
typedef enum _CY_SPI_PROTOCOL {
CY_SPI_MOTOROLA = 0,
CY_SPI_TI,
CY_SPI_NS
} CY_SPI_PROTOCOL;
typedef struct _CY_SPI_CONFIG
{
UINT32 frequency;
UCHAR dataWidth;
CY_SPI_PROTOCOL protocol ;
BOOL isMsbFirst;
BOOL isMaster;
BOOL isContinuousMode;
BOOL isSelectPrecede;
BOOL isCpha;
BOOL isCpol;
}CY_SPI_CONFIG,*PCY_SPI_CONFIG;
typedef enum _CY_UART_BAUD_RATE
{
CY_UART_BAUD_300 = 300,
CY_UART_BAUD_600 = 600,
CY_UART_BAUD_1200 = 1200,
CY_UART_BAUD_2400 = 2400,
CY_UART_BAUD_4800 = 4800,
CY_UART_BAUD_9600 = 9600,
CY_UART_BAUD_14400 = 14400,
CY_UART_BAUD_19200 = 19200,
CY_UART_BAUD_38400 = 38400,
CY_UART_BAUD_56000 = 56000,
CY_UART_BAUD_57600 = 57600,
CY_UART_BAUD_115200 = 115200,
CY_UART_BAUD_230400 = 230400,
CY_UART_BAUD_460800 = 460800,
CY_UART_BAUD_921600 = 921600,
CY_UART_BAUD_1000000 = 1000000,
CY_UART_BAUD_3000000 = 3000000,
}CY_UART_BAUD_RATE;
typedef enum _CY_UART_PARITY_MODE {
CY_DATA_PARITY_DISABLE = 0,
CY_DATA_PARITY_ODD,
CY_DATA_PARITY_EVEN,
CY_DATA_PARITY_MARK,
CY_DATA_PARITY_SPACE
} CY_UART_PARITY_MODE;
typedef enum _CY_UART_STOP_BIT {
CY_UART_ONE_STOP_BIT = 1,
CY_UART_TWO_STOP_BIT
} CY_UART_STOP_BIT;
typedef enum _CY_FLOW_CONTROL_MODES {
CY_UART_FLOW_CONTROL_DISABLE = 0,
CY_UART_FLOW_CONTROL_DSR,
CY_UART_FLOW_CONTROL_RTS_CTS,
CY_UART_FLOW_CONTROL_ALL
} CY_FLOW_CONTROL_MODES;
typedef struct _CY_UART_CONFIG {
CY_UART_BAUD_RATE baudRate;
UINT8 dataWidth;
CY_UART_STOP_BIT stopBits;
CY_UART_PARITY_MODE parityMode;
BOOL isDropOnRxErrors;
} CY_UART_CONFIG,*PCY_UART_CONFIG;
typedef enum _CY_CALLBACK_EVENTS {
CY_UART_CTS_BIT = 0x01,
CY_UART_DSR_BIT = 0x02,
CY_UART_BREAK_BIT = 0x04,
CY_UART_RING_SIGNAL_BIT = 0x08,
CY_UART_FRAME_ERROR_BIT = 0x10,
CY_UART_PARITY_ERROR_BIT = 0x20,
CY_UART_DATA_OVERRUN_BIT = 0x40,
CY_UART_DCD_BIT = 0x100,
CY_SPI_TX_UNDERFLOW_BIT = 0x200,
CY_SPI_BUS_ERROR_BIT = 0x400,
CY_ERROR_EVENT_FAILED_BIT = 0x800
} CY_CALLBACK_EVENTS;
CY_RETURN_STATUS CyLibraryInit ();
CY_RETURN_STATUS CyLibraryExit ();
CY_RETURN_STATUS CyGetListofDevices (
UINT8* numDevices
);
CY_RETURN_STATUS CyGetDeviceInfo(
UINT8 deviceNumber,
CY_DEVICE_INFO *deviceInfo
);
CY_RETURN_STATUS CyGetDeviceInfoVidPid (
CY_VID_PID vidPid,
UINT8 *deviceIdList,
CY_DEVICE_INFO *deviceInfoList,
UINT8 *deviceCount,
UINT8 infoListLength
);
CY_RETURN_STATUS CyOpen (
UINT8 deviceNumber,
UINT8 interfaceNum,
CY_HANDLE *handle
);
CY_RETURN_STATUS CyClose (
CY_HANDLE handle
);
CY_RETURN_STATUS CyCyclePort (
CY_HANDLE handle
);
CY_RETURN_STATUS CySetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 value
);
CY_RETURN_STATUS CyGetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 *value
);
CY_RETURN_STATUS CySetEventNotification(
CY_HANDLE handle,
CY_EVENT_NOTIFICATION_CB_FN notificationCbFn
);
CY_RETURN_STATUS CyAbortEventNotification(
CY_HANDLE handle
);
CY_RETURN_STATUS CyGetLibraryVersion (
CY_HANDLE handle,
PCY_LIBRARY_VERSION version
);
CY_RETURN_STATUS CyGetFirmwareVersion (
CY_HANDLE handle,
PCY_FIRMWARE_VERSION firmwareVersion
);
CY_RETURN_STATUS CyResetDevice (
CY_HANDLE handle
);
CY_RETURN_STATUS CyProgUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *progBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyReadUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyGetSignature (
CY_HANDLE handle,
UCHAR *pSignature
);
CY_RETURN_STATUS CyGetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CySetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CyUartRead (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartSetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES mode
);
CY_RETURN_STATUS CyUartGetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES *mode
);
CY_RETURN_STATUS CyUartSetRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetBreak(
CY_HANDLE handle,
UINT16 timeout
);
CY_RETURN_STATUS CyGetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CySetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CyI2cRead (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cWrite (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cReset(
CY_HANDLE handle,
BOOL resetMode
);
CY_RETURN_STATUS CyGetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySpiReadWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagEnable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagDisable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagWrite (
CY_HANDLE handle,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagRead (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyPhdcClrFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcSetFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcGetStatus (
CY_HANDLE handle,
UINT16 *dataStatus
);
"""
| [((9, 3, 9, 20), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import platform\n')] |
richarajpal/deep_qa | deep_qa/layers/wrappers/output_mask.py | d918335a1bed71b9cfccf1d5743321cee9c61952 | from overrides import overrides
from ..masked_layer import MaskedLayer
class OutputMask(MaskedLayer):
"""
This Layer is purely for debugging. You can wrap this on a layer's output to get the mask
output by that layer as a model output, for easier visualization of what the model is actually
doing.
Don't try to use this in an actual model.
"""
@overrides
def compute_mask(self, inputs, mask=None):
return None
@overrides
def call(self, inputs, mask=None): # pylint: disable=unused-argument
return mask
| [] |
karnesh/Monte-Carlo-LJ | ljmc/energy.py | f33f08c247df963ca48b9d9f8456e26c0bb19923 | """
energy.py
function that computes the inter particle energy
It uses truncated 12-6 Lennard Jones potential
All the variables are in reduced units.
"""
def distance(atom1, atom2):
"""
Computes the square of inter particle distance
Minimum image convention is applied for distance calculation for periodic boundary conditions
"""
dx = atom1.x - atom2.x
dy = atom1.y - atom2.y
dz = atom1.z - atom2.z
if dx > halfLx
dx -= Lx
elif dx < -halfLx:
dx += Lx
if dy > halfLy:
dy -= Ly
elif dy < -halfLy:
dy += Ly
if dz > halfLz:
dz -= Lz
elif dz < -halfLz:
dz += Lz
return dx**2 + dy**2 + dz**2
def energy(atom1, atom2, rc):
'''
calculates the energy of the system
'''
## Arithmatic mixing rules - Lorentz Berthlot mixing
eps = (atom1.eps + atom2.eps)/2
sig = (atom1.sigma * atom2.sigma)**0.5
rcsq = rc**2
rsq = distance(atom1, atom2)
if rsq <= rcsq:
energy = 4.0*eps*( (sig/rsq)**6.0 - (sig/rsq)**3.0)
else:
energy = 0.0
def writeEnergy(step, energy):
'''
Writes the energy to a file.
'''
with open('energy.dat', 'a') as f:
f.write('{0} {1}\n'.format(step, energy))
| [] |
ludgerradke/bMRI | CEST/Evaluation/lorenzian.py | dcf93749bb2fba3700e6bcfde691355d55090951 | import numpy as np
import math
from scipy.optimize import curve_fit
def calc_lorentzian(CestCurveS, x_calcentires, mask, config):
(rows, colums, z_slices, entires) = CestCurveS.shape
lorenzian = {key: np.zeros((rows, colums, z_slices), dtype=float) for key in config.lorenzian_keys}
for k in range(z_slices):
for i in range(rows):
for j in range(colums):
if mask[i, j, k] != 0:
params = calc_lorenzian_pixel(CestCurveS[i, j, k, :], x_calcentires, config.Lorenzian['MT_f'],
config.Lorenzian['NOE1_f'], config.Lorenzian['NOE2_f'],
config.Lorenzian['OH_f'], config.Lorenzian['NH_f'])
if params is None:
continue
dic = {
'OH_a': params[3],
'OH_w': params[4],
'NH_a': params[5],
'NH_w': params[6],
'NOE1_a': params[7],
'NOE1_w': params[8],
'NOE2_a': params[9],
'NOE2_w': params[10],
'MT_a': params[11],
'MT_w': params[12],
}
for key in config.lorenzian_keys:
lorenzian[key][i, j, k] = dic[key]
return lorenzian
def calc_lorenzian_pixel(values, x_calcentires, MT_f, NOE1_f, NOE2_f, OH_f, NH_f):
# wassr_offset, da die Z-Spektren vorher korrigiert wurden
fit = lorenz_like_matlab(wassr_offset=0, MT_f=MT_f, NOE1_f=NOE1_f, NOE2_f=NOE2_f, OH_f=OH_f, NH_f=NH_f)
try:
param, param_cov = curve_fit(fit, x_calcentires, values, bounds=([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10,
10]))
except RuntimeError:
param = None
return param
def lorenz_like_matlab(wassr_offset, MT_f: float = - 2.43, NOE1_f: float = - 1, NOE2_f: float = - 2.6,
OH_f: float = + 1.4, NH_f: float = + 3.2):
# X_f = frequenz of X
#ret = (a + ak) - (a * ((b ** 2) / 4) / (((b ** 2) / 4) + (x - wassr_offset) ** 2))
pass
def one_lorenz(x, amplitude, width, wassr_offset, frequenz):
return amplitude * ((width ** 2) / 4) / (((width ** 2) / 4) + (x - (wassr_offset + frequenz)) ** 2)
| [((9, 22, 9, 69), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((40, 27, 43, 79), 'scipy.optimize.curve_fit', 'curve_fit', (), '', False, 'from scipy.optimize import curve_fit\n')] |
neuralchen/CooGAN | components/network_models_LSTU.py | 3155cbb5a283226474356d3a9f01918609ddd4ec | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#############################################################
# File: network_models_LSTU.py
# Created Date: Tuesday February 25th 2020
# Author: Chen Xuanhong
# Email: [email protected]
# Last Modified: Tuesday, 25th February 2020 9:57:06 pm
# Modified By: Chen Xuanhong
# Copyright (c) 2020 Shanghai Jiao Tong University
#############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tflib as tl
conv = partial(slim.conv2d, activation_fn=None)
dconv = partial(slim.conv2d_transpose, activation_fn=None)
fc = partial(tl.flatten_fully_connected, activation_fn=None)
relu = tf.nn.relu
lrelu = tf.nn.leaky_relu
sigmoid = tf.nn.sigmoid
tanh = tf.nn.tanh
batch_norm = partial(slim.batch_norm, scale=True, updates_collections=None)
instance_norm = slim.instance_norm
MAX_DIM = 64 * 16
def Genc(x, dim=64, n_layers=5, multi_inputs=1, is_training=True):
bn = partial(batch_norm, is_training=is_training)
conv_bn_lrelu = partial(conv, normalizer_fn=bn, activation_fn=lrelu)
with tf.variable_scope('Genc', reuse=tf.AUTO_REUSE):
h, w = x.shape[1:3]
z = x
zs = []
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
if multi_inputs > i and i > 0:
z = tf.concat([z, tf.image.resize_bicubic(x, (h//(2**i), w//(2**i)))], 3)
z = conv_bn_lrelu(z, d, 4, 2)
zs.append(z)
return zs
def LSTU(in_data, state, out_channel, is_training=True, kernel_size=3, norm='none', pass_state='lstate'):
if norm == 'bn':
norm_fn = partial(batch_norm, is_training=is_training)
elif norm == 'in':
norm_fn = instance_norm
else:
norm_fn = None
gate = partial(conv, normalizer_fn=norm_fn, activation_fn=sigmoid)
info = partial(conv, normalizer_fn=norm_fn, activation_fn=tanh)
with tf.name_scope('ConvGRUCell'):
state_ = dconv(state, out_channel, 4, 2) # upsample and make `channel` identical to `out_channel`
reset_gate = gate(tf.concat([in_data, state_], axis=3), 1, kernel_size)
update_gate = gate(tf.concat([in_data, state_], axis=3), 1, kernel_size)
new_state = reset_gate * state_
new_info = info(tf.concat([in_data, new_state], axis=3), out_channel, kernel_size)
output = (1-update_gate)*state_ + update_gate*new_info
if pass_state == 'gru':
return output, output
elif pass_state == 'direct':
return output, state_
else: # 'stu'
return output, new_state
# state_hat = dconv(old_state, outdim, 4, 2)
# tmp_concat= _concat(x, state_hat, None)
# channelpool1=tf.concat([tf.reduce_max(tmp_concat,3,True), tf.reduce_mean(tmp_concat,3,True)], axis=3)
# r_channel=conv(channelpool1,1,7,1,normalizer_fn=None,activation_fn=sigmoid)
# new_state = r_channel * state_hat
# tmp_concat= _concat(x, new_state, None)
# hidden_info = conv(tmp_concat,outdim,3,1,normalizer_fn=None,activation_fn=tanh)
# tmp_concat= _concat(x, state_hat, None)
# channelpool2=tf.concat([tf.reduce_max(tmp_concat,3,True), tf.reduce_mean(tmp_concat,3,True)], axis=3)
# z=conv(channelpool2,1,7,1,normalizer_fn=None,activation_fn=sigmoid)
# output =z *hidden_info +(1-z)*state_hat
# return output,new_state
def Gstu(zs, _a, dim=64, n_layers=1, inject_layers=0, is_training=True, kernel_size=3, norm='none', pass_state='stu'):
def _concat(z, z_, _a):
feats = [z]
if z_ is not None:
feats.append(z_)
if _a is not None:
_a = tf.reshape(_a, [-1, 1, 1, tl.shape(_a)[-1]])
_a = tf.tile(_a, [1, tl.shape(z)[1], tl.shape(z)[2], 1])
feats.append(_a)
return tf.concat(feats, axis=3)
with tf.variable_scope('Gstu', reuse=tf.AUTO_REUSE):
zs_ = [zs[-1]]
state = _concat(zs[-1], None, _a)
for i in range(n_layers): # n_layers <= 4
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
output = LSTU(zs[n_layers - 1 - i],state,d,is_training=is_training,
kernel_size=kernel_size, norm=norm, pass_state=pass_state)
zs_.insert(0, output[0])
if inject_layers > i:
state = _concat(output[1], None, _a)
else:
state = output[1]
return zs_
def Gdec(zs, _a, dim=64, n_layers=5, shortcut_layers=1, inject_layers=0, is_training=True, one_more_conv=0):
bn = partial(batch_norm, is_training=is_training)
dconv_bn_relu = partial(dconv, normalizer_fn=bn, activation_fn=relu)
shortcut_layers = min(shortcut_layers, n_layers - 1)
inject_layers = min(inject_layers, n_layers - 1)
def _concat(z, z_, _a):
feats = [z]
if z_ is not None:
feats.append(z_)
if _a is not None:
_a = tf.reshape(_a, [-1, 1, 1, tl.shape(_a)[-1]])
_a = tf.tile(_a, [1, tl.shape(z)[1], tl.shape(z)[2], 1])
feats.append(_a)
return tf.concat(feats, axis=3)
with tf.variable_scope('Gdec', reuse=tf.AUTO_REUSE):
z = _concat(zs[-1], None, _a)
for i in range(n_layers):
if i < n_layers - 1:
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
z = dconv_bn_relu(z, d, 4, 2)
if shortcut_layers > i:
z = _concat(z, zs[n_layers - 2 - i], None)
if inject_layers > i:
z = _concat(z, None, _a)
else:
if one_more_conv: # add one more conv after the decoder
z = dconv_bn_relu(z, dim//4, 4, 2)
x = tf.nn.tanh(dconv(z, 3, one_more_conv))
else:
x = z = tf.nn.tanh(dconv(z, 3, 4, 2))
return x
def D(x, n_att, dim=64, fc_dim=MAX_DIM, n_layers=5):
conv_in_lrelu = partial(conv, normalizer_fn=instance_norm, activation_fn=lrelu)
with tf.variable_scope('D', reuse=tf.AUTO_REUSE):
y = x
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
y = conv_in_lrelu(y, d, 4, 2)
logit_gan = lrelu(fc(y, fc_dim))
logit_gan = fc(logit_gan, 1)
logit_att = lrelu(fc(y, fc_dim))
logit_att = fc(logit_att, n_att)
return logit_gan, logit_att
def gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
with tf.name_scope('interpolate'):
if b is None: # interpolation in DRAGAN
beta = tf.random_uniform(shape=tf.shape(a), minval=0., maxval=1.)
_, variance = tf.nn.moments(a, range(a.shape.ndims))
b = a + 0.5 * tf.sqrt(variance) * beta
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random_uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.get_shape().as_list())
return inter
with tf.name_scope('gradient_penalty'):
x = _interpolate(real, fake)
pred = f(x)
if isinstance(pred, tuple):
pred = pred[0]
grad = tf.gradients(pred, x)[0]
norm = tf.norm(slim.flatten(grad), axis=1)
gp = tf.reduce_mean((norm - 1.)**2)
return gp
| [((25, 7, 25, 47), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((26, 8, 26, 58), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((27, 5, 27, 60), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((32, 13, 32, 75), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((39, 9, 39, 53), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((40, 20, 40, 72), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((61, 11, 61, 70), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((62, 11, 62, 67), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((118, 9, 118, 53), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((119, 20, 119, 72), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((154, 20, 154, 83), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((42, 9, 42, 55), 'tensorflow.variable_scope', 'tf.variable_scope', (), '', True, 'import tensorflow as tf\n'), ((56, 18, 56, 62), 'functools.partial', 'partial', (), '', False, 'from functools import partial\n'), ((63, 9, 63, 37), 'tensorflow.name_scope', 'tf.name_scope', ({(63, 23, 63, 36): '"""ConvGRUCell"""'}, {}), "('ConvGRUCell')", True, 'import tensorflow as tf\n'), ((100, 15, 100, 39), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((102, 9, 102, 55), 'tensorflow.variable_scope', 'tf.variable_scope', (), '', True, 'import tensorflow as tf\n'), ((132, 15, 132, 39), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((134, 9, 134, 55), 'tensorflow.variable_scope', 'tf.variable_scope', (), '', True, 'import tensorflow as tf\n'), ((156, 9, 156, 52), 'tensorflow.variable_scope', 'tf.variable_scope', (), '', True, 'import tensorflow as tf\n'), ((184, 9, 184, 42), 'tensorflow.name_scope', 'tf.name_scope', ({(184, 23, 184, 41): '"""gradient_penalty"""'}, {}), "('gradient_penalty')", True, 'import tensorflow as tf\n'), ((191, 13, 191, 43), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(191, 28, 191, 42): '(norm - 1.0) ** 2'}, {}), '((norm - 1.0) ** 2)', True, 'import tensorflow as tf\n'), ((65, 27, 65, 63), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((66, 27, 66, 63), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((68, 27, 68, 66), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((173, 13, 173, 41), 'tensorflow.name_scope', 'tf.name_scope', ({(173, 27, 173, 40): '"""interpolate"""'}, {}), "('interpolate')", True, 'import tensorflow as tf\n'), ((179, 20, 179, 72), 'tensorflow.random_uniform', 'tf.random_uniform', (), '', True, 'import tensorflow as tf\n'), ((189, 15, 189, 36), 'tensorflow.gradients', 'tf.gradients', ({(189, 28, 189, 32): 'pred', (189, 34, 189, 35): 'x'}, {}), '(pred, x)', True, 'import tensorflow as tf\n'), ((190, 23, 190, 41), 'tensorflow.contrib.slim.flatten', 'slim.flatten', ({(190, 36, 190, 40): 'grad'}, {}), '(grad)', True, 'import tensorflow.contrib.slim as slim\n'), ((49, 34, 49, 84), 'tensorflow.image.resize_bicubic', 'tf.image.resize_bicubic', ({(49, 58, 49, 59): 'x', (49, 61, 49, 83): '(h // 2 ** i, w // 2 ** i)'}, {}), '(x, (h // 2 ** i, w // 2 ** i))', True, 'import tensorflow as tf\n'), ((97, 43, 97, 55), 'tflib.shape', 'tl.shape', ({(97, 52, 97, 54): '_a'}, {}), '(_a)', True, 'import tflib as tl\n'), ((98, 33, 98, 44), 'tflib.shape', 'tl.shape', ({(98, 42, 98, 43): 'z'}, {}), '(z)', True, 'import tflib as tl\n'), ((98, 49, 98, 60), 'tflib.shape', 'tl.shape', ({(98, 58, 98, 59): 'z'}, {}), '(z)', True, 'import tflib as tl\n'), ((129, 43, 129, 55), 'tflib.shape', 'tl.shape', ({(129, 52, 129, 54): '_a'}, {}), '(_a)', True, 'import tflib as tl\n'), ((130, 33, 130, 44), 'tflib.shape', 'tl.shape', ({(130, 42, 130, 43): 'z'}, {}), '(z)', True, 'import tflib as tl\n'), ((130, 49, 130, 60), 'tflib.shape', 'tl.shape', ({(130, 58, 130, 59): 'z'}, {}), '(z)', True, 'import tflib as tl\n'), ((175, 47, 175, 58), 'tensorflow.shape', 'tf.shape', ({(175, 56, 175, 57): 'a'}, {}), '(a)', True, 'import tensorflow as tf\n'), ((178, 21, 178, 32), 'tensorflow.shape', 'tf.shape', ({(178, 30, 178, 31): 'a'}, {}), '(a)', True, 'import tensorflow as tf\n'), ((177, 30, 177, 47), 'tensorflow.sqrt', 'tf.sqrt', ({(177, 38, 177, 46): 'variance'}, {}), '(variance)', True, 'import tensorflow as tf\n')] |
torokmark/slender | slender/tests/list/test_keep_if.py | 3bf815e22f7802ba48706f31ba608cf609e23e68 |
from unittest import TestCase
from expects import expect, equal, raise_error
from slender import List
class TestKeepIf(TestCase):
def test_keep_if_if_func_is_none(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(None).to_list()).to(equal([1, 2, 3, 4, 5]))
def test_keep_if_if_func_is_valid(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(lambda item: item > 3).to_list()).to(equal([4, 5]))
def test_keep_if_if_func_is_invalid_for_all_items(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(lambda item: item > 6).to_list()).to(equal([]))
def test_keep_if_if_func_is_different(self):
e = List([1, 2, 3, 4])
expect(lambda: e.keep_if('...')).to(raise_error(TypeError))
| [((10, 12, 10, 33), 'slender.List', 'List', ({(10, 17, 10, 32): '[1, 2, 3, 4, 5]'}, {}), '([1, 2, 3, 4, 5])', False, 'from slender import List\n'), ((14, 12, 14, 33), 'slender.List', 'List', ({(14, 17, 14, 32): '[1, 2, 3, 4, 5]'}, {}), '([1, 2, 3, 4, 5])', False, 'from slender import List\n'), ((18, 12, 18, 33), 'slender.List', 'List', ({(18, 17, 18, 32): '[1, 2, 3, 4, 5]'}, {}), '([1, 2, 3, 4, 5])', False, 'from slender import List\n'), ((22, 12, 22, 30), 'slender.List', 'List', ({(22, 17, 22, 29): '[1, 2, 3, 4]'}, {}), '([1, 2, 3, 4])', False, 'from slender import List\n'), ((11, 45, 11, 67), 'expects.equal', 'equal', ({(11, 51, 11, 66): '[1, 2, 3, 4, 5]'}, {}), '([1, 2, 3, 4, 5])', False, 'from expects import expect, equal, raise_error\n'), ((15, 62, 15, 75), 'expects.equal', 'equal', ({(15, 68, 15, 74): '[4, 5]'}, {}), '([4, 5])', False, 'from expects import expect, equal, raise_error\n'), ((19, 62, 19, 71), 'expects.equal', 'equal', ({(19, 68, 19, 70): '[]'}, {}), '([])', False, 'from expects import expect, equal, raise_error\n'), ((23, 44, 23, 66), 'expects.raise_error', 'raise_error', ({(23, 56, 23, 65): 'TypeError'}, {}), '(TypeError)', False, 'from expects import expect, equal, raise_error\n')] |
1Crazymoney/bitcoin-cash-node | test/functional/bchn-txbroadcastinterval.py | 8f82823b3c5d4bcb401b0e4e6b464c1228f936e1 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Cash Node developers
# Author matricz
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that inv messages are sent according to
an exponential distribution with scale -txbroadcastinterval
The outbound interval should be half of the inbound
"""
import time
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, connect_nodes, disconnect_nodes
from scipy import stats
class InvReceiver(P2PInterface):
def __init__(self):
super().__init__()
self.invTimes = []
self.invDelays = []
def on_inv(self, message):
timeArrived = time.time()
# If an inv contains more then one transaction, then the number of invs (==samplesize)
# will be non-deterministic. This would be an error.
assert(len(message.inv) == 1)
self.invTimes.append(timeArrived)
if len(self.invTimes) > 1:
timediff = self.invTimes[-1] - self.invTimes[-2]
self.invDelays.append(timediff)
class TxBroadcastIntervalTest(BitcoinTestFramework):
# This test will have a node create a number of transactions and relay them
# to the mininode InvReceivers (one inbound and one outbound)
# according to test parameters.
# A third disconnected node is used only to create signed transactions
# The nodes are configured with "-txbroadcastrate=1" and
# "-excessiveblocksize=2000000" so that they relay at most one tx per inv
# It's convenient, because we can now define the exact number of invs
# (== sample size -1) that we want to send
# This holds true only for interval values <= 500 ms
# The mininode InvReceiver just listens and registers the delays between invs
# and constructs a sample array from these delays
# This sample is tested against a reference exponential distribution
# density with the same parameters with scipy.stats.kstest
# (See https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test)
# The test is accepted if the delays sample resembles the reference
# distribution -- or, more specifically, if the probability that the
# observed distribution would have occurred as a sampling of the theoretical
# exponential distribution with a probability of at least alpha
# (pvalue > alpha, default 0.001)
# There is one mininode that connects directly to the node that generates transactions.
# This tests the *inbound* connection interval.
# The first node creates an outbound connection to the second node,
# which relays the transactions instantly (-txbroadcastinterval=1)
# to the second mininode, which tests the *outbound* connection interval (= 1/2 of the inbound).
# (but is less reliable for small values of the -txbroadcastinterval)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--interval", dest="interval", type=int, default=500,
help="Set the average send interval in ms")
parser.add_argument("--samplesize", dest="samplesize", type=int, default=100,
help="Set the samplesize (number of inv message delays) for testing")
parser.add_argument("--testoutbound", dest="testoutbound", action="store_true",
help="Set whether to test outbound (along inbound) connection interval")
parser.add_argument("--alpha", dest="alpha", type=float, default="0.001",
help="Set a confidence threshold for the kstest")
def set_test_params(self):
self.scale = self.options.interval / 1000
self.num_nodes = 3
args = [
["-txbroadcastinterval={}".format(self.options.interval),
"-txbroadcastrate=1", "-excessiveblocksize=2000000",
"-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)],
["-txbroadcastinterval=1",
"-txbroadcastrate=1", "-excessiveblocksize=2000000",
"-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)],
["-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)]
]
self.extra_args = args
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], self.nodes[1])
connect_nodes(self.nodes[1], self.nodes[2])
# Generate enough coins on the spending nodes
self.nodes[2].generate(20 + 100)
self.sync_all()
# Disconnect node 3 so that it doesn't broadcast the txs it creates
disconnect_nodes(self.nodes[1], self.nodes[2])
self.signedtxs = []
to = self.nodes[2].getnewaddress()
for i in range(self.options.samplesize):
txid = self.nodes[2].sendtoaddress(to, "0.00001", "comment", "comment_to", False, 2)
self.signedtxs.append(self.nodes[2].gettransaction(txid)['hex'])
def run_test(self):
inboundReceiver, outboundReceiver = InvReceiver(), InvReceiver()
self.nodes[0].add_p2p_connection(inboundReceiver)
self.nodes[1].add_p2p_connection(outboundReceiver)
for signextx in self.signedtxs:
self.nodes[0].sendrawtransaction(signextx, True)
wait_until(
lambda: len(inboundReceiver.invTimes) == self.options.samplesize,
lock=mininode_lock,
timeout=self.options.samplesize * self.options.interval / 1000 * 2)
wait_until(
lambda: len(outboundReceiver.invTimes) == self.options.samplesize,
lock=mininode_lock,
timeout=self.options.samplesize * self.options.interval / 1000)
inboundkstestresult = stats.kstest(inboundReceiver.invDelays, stats.expon(scale=self.scale).cdf)
outboundkstestresult = stats.kstest(outboundReceiver.invDelays, stats.expon(scale=self.scale / 2).cdf)
self.log.info("kstestresults for interval {}: inbound {}, outbound {}".format(
self.options.interval,
inboundkstestresult,
outboundkstestresult))
assert(inboundkstestresult.pvalue > self.options.alpha), inboundReceiver.invDelays
if self.options.testoutbound:
assert(outboundkstestresult.pvalue > self.options.alpha), outboundReceiver.invDelays
if __name__ == '__main__':
TxBroadcastIntervalTest().main()
| [((28, 22, 28, 33), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((101, 8, 101, 51), 'test_framework.util.connect_nodes', 'connect_nodes', ({(101, 22, 101, 35): 'self.nodes[0]', (101, 37, 101, 50): 'self.nodes[1]'}, {}), '(self.nodes[0], self.nodes[1])', False, 'from test_framework.util import wait_until, connect_nodes, disconnect_nodes\n'), ((102, 8, 102, 51), 'test_framework.util.connect_nodes', 'connect_nodes', ({(102, 22, 102, 35): 'self.nodes[1]', (102, 37, 102, 50): 'self.nodes[2]'}, {}), '(self.nodes[1], self.nodes[2])', False, 'from test_framework.util import wait_until, connect_nodes, disconnect_nodes\n'), ((108, 8, 108, 54), 'test_framework.util.disconnect_nodes', 'disconnect_nodes', ({(108, 25, 108, 38): 'self.nodes[1]', (108, 40, 108, 53): 'self.nodes[2]'}, {}), '(self.nodes[1], self.nodes[2])', False, 'from test_framework.util import wait_until, connect_nodes, disconnect_nodes\n'), ((132, 70, 132, 99), 'scipy.stats.expon', 'stats.expon', (), '', False, 'from scipy import stats\n'), ((133, 72, 133, 105), 'scipy.stats.expon', 'stats.expon', (), '', False, 'from scipy import stats\n')] |
buaaqt/dgl | tests/compute/test_sampler.py | 64f6f3c1a8c2c3e08ec0750b902f3e2c63fd2cd7 | import backend as F
import numpy as np
import scipy as sp
import dgl
from dgl import utils
import unittest
from numpy.testing import assert_array_equal
np.random.seed(42)
def generate_rand_graph(n):
arr = (sp.sparse.random(n, n, density=0.1, format='coo') != 0).astype(np.int64)
return dgl.DGLGraph(arr, readonly=True)
def test_create_full():
g = generate_rand_graph(100)
full_nf = dgl.contrib.sampling.sampler.create_full_nodeflow(g, 5)
assert full_nf.number_of_nodes() == g.number_of_nodes() * 6
assert full_nf.number_of_edges() == g.number_of_edges() * 5
def test_1neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 1, g.number_of_nodes(), neighbor_type='in', num_workers=4)):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
src, dst, eid = g.in_edges(seed_ids, form='all')
assert subg.number_of_nodes() == len(src) + 1
assert subg.number_of_edges() == len(src)
assert seed_ids == subg.layer_parent_nid(-1)
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
assert F.array_equal(child_src, subg.layer_nid(0))
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def is_sorted(arr):
return np.sum(np.sort(arr) == arr, 0) == len(arr)
def verify_subgraph(g, subg, seed_id):
seed_id = F.asnumpy(seed_id)
seeds = F.asnumpy(subg.map_to_parent_nid(subg.layer_nid(-1)))
assert seed_id in seeds
child_seed = F.asnumpy(subg.layer_nid(-1))[seeds == seed_id]
src, dst, eid = g.in_edges(seed_id, form='all')
child_src, child_dst, child_eid = subg.in_edges(child_seed, form='all')
child_src = F.asnumpy(child_src)
# We don't allow duplicate elements in the neighbor list.
assert(len(np.unique(child_src)) == len(child_src))
# The neighbor list also needs to be sorted.
assert(is_sorted(child_src))
# a neighbor in the subgraph must also exist in parent graph.
src = F.asnumpy(src)
for i in subg.map_to_parent_nid(child_src):
assert F.asnumpy(i) in src
def test_1neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_prefetch_neighbor_sampler():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 1, 5, neighbor_type='in',
num_workers=4, prefetch=True):
seed_ids = subg.layer_parent_nid(-1)
assert len(seed_ids) == 1
assert subg.number_of_nodes() <= 6
assert subg.number_of_edges() <= 5
verify_subgraph(g, subg, seed_ids)
def test_10neighbor_sampler_all():
g = generate_rand_graph(100)
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, g.number_of_nodes(),
neighbor_type='in', num_workers=4):
seed_ids = subg.layer_parent_nid(-1)
assert F.array_equal(seed_ids, subg.map_to_parent_nid(subg.layer_nid(-1)))
src, dst, eid = g.in_edges(seed_ids, form='all')
child_src, child_dst, child_eid = subg.in_edges(subg.layer_nid(-1), form='all')
src1 = subg.map_to_parent_nid(child_src)
assert F.array_equal(src1, src)
def check_10neighbor_sampler(g, seeds):
# In this case, NeighborSampling simply gets the neighborhood of a single vertex.
for subg in dgl.contrib.sampling.NeighborSampler(g, 10, 5, neighbor_type='in',
num_workers=4, seed_nodes=seeds):
seed_ids = subg.layer_parent_nid(-1)
assert subg.number_of_nodes() <= 6 * len(seed_ids)
assert subg.number_of_edges() <= 5 * len(seed_ids)
for seed_id in seed_ids:
verify_subgraph(g, subg, seed_id)
def test_10neighbor_sampler():
g = generate_rand_graph(100)
check_10neighbor_sampler(g, None)
check_10neighbor_sampler(g, seeds=np.unique(np.random.randint(0, g.number_of_nodes(),
size=int(g.number_of_nodes() / 10))))
def _test_layer_sampler(prefetch=False):
g = generate_rand_graph(100)
nid = g.nodes()
src, dst, eid = g.all_edges(form='all', order='eid')
n_batches = 5
batch_size = 50
seed_batches = [np.sort(np.random.choice(F.asnumpy(nid), batch_size, replace=False))
for i in range(n_batches)]
seed_nodes = np.hstack(seed_batches)
layer_sizes = [50] * 3
LayerSampler = getattr(dgl.contrib.sampling, 'LayerSampler')
sampler = LayerSampler(g, batch_size, layer_sizes, 'in',
seed_nodes=seed_nodes, num_workers=4, prefetch=prefetch)
for sub_g in sampler:
assert all(sub_g.layer_size(i) < size for i, size in enumerate(layer_sizes))
sub_nid = F.arange(0, sub_g.number_of_nodes())
assert all(np.all(np.isin(F.asnumpy(sub_g.layer_nid(i)), F.asnumpy(sub_nid)))
for i in range(sub_g.num_layers))
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_nid(sub_nid)),
F.asnumpy(nid)))
sub_eid = F.arange(0, sub_g.number_of_edges())
assert np.all(np.isin(F.asnumpy(sub_g.map_to_parent_eid(sub_eid)),
F.asnumpy(eid)))
assert any(np.all(np.sort(F.asnumpy(sub_g.layer_parent_nid(-1))) == seed_batch)
for seed_batch in seed_batches)
sub_src, sub_dst = sub_g.all_edges(order='eid')
for i in range(sub_g.num_blocks):
block_eid = sub_g.block_eid(i)
block_src = sub_g.map_to_parent_nid(F.gather_row(sub_src, block_eid))
block_dst = sub_g.map_to_parent_nid(F.gather_row(sub_dst, block_eid))
block_parent_eid = sub_g.block_parent_eid(i)
block_parent_src = F.gather_row(src, block_parent_eid)
block_parent_dst = F.gather_row(dst, block_parent_eid)
assert np.all(F.asnumpy(block_src == block_parent_src))
n_layers = sub_g.num_layers
sub_n = sub_g.number_of_nodes()
assert sum(F.shape(sub_g.layer_nid(i))[0] for i in range(n_layers)) == sub_n
n_blocks = sub_g.num_blocks
sub_m = sub_g.number_of_edges()
assert sum(F.shape(sub_g.block_eid(i))[0] for i in range(n_blocks)) == sub_m
def test_layer_sampler():
_test_layer_sampler()
_test_layer_sampler(prefetch=True)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="Error occured when multiprocessing")
def test_nonuniform_neighbor_sampler():
# Construct a graph with
# (1) A path (0, 1, ..., 99) with weight 1
# (2) A bunch of random edges with weight 0.
edges = []
for i in range(99):
edges.append((i, i + 1))
for i in range(1000):
edge = (np.random.randint(100), np.random.randint(100))
if edge not in edges:
edges.append(edge)
src, dst = zip(*edges)
g = dgl.DGLGraph()
g.add_nodes(100)
g.add_edges(src, dst)
g.readonly()
g.edata['w'] = F.cat([
F.ones((99,), F.float64, F.cpu()),
F.zeros((len(edges) - 99,), F.float64, F.cpu())], 0)
# Test 1-neighbor NodeFlow with 99 as target node.
# The generated NodeFlow should only contain node i on layer i.
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'in', transition_prob='w', seed_nodes=[99])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == i
# Test the reverse direction
sampler = dgl.contrib.sampling.NeighborSampler(
g, 1, 1, 99, 'out', transition_prob='w', seed_nodes=[0])
nf = next(iter(sampler))
assert nf.num_layers == 100
for i in range(nf.num_layers):
assert nf.layer_size(i) == 1
assert F.asnumpy(nf.layer_parent_nid(i)[0]) == 99 - i
def test_setseed():
g = generate_rand_graph(100)
nids = []
dgl.random.seed(42)
for subg in dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1):
nids.append(
tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3)))
# reinitialize
dgl.random.seed(42)
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=1)):
item = tuple(tuple(F.asnumpy(subg.layer_parent_nid(i))) for i in range(3))
assert item == nids[i]
for i, subg in enumerate(dgl.contrib.sampling.NeighborSampler(
g, 5, 3, num_hops=2, neighbor_type='in', num_workers=4)):
pass
def check_head_tail(g):
lsrc, ldst, leid = g.all_edges(form='all', order='eid')
lsrc = np.unique(F.asnumpy(lsrc))
head_nid = np.unique(F.asnumpy(g.head_nid))
assert len(head_nid) == len(g.head_nid)
np.testing.assert_equal(lsrc, head_nid)
ldst = np.unique(F.asnumpy(ldst))
tail_nid = np.unique(F.asnumpy(g.tail_nid))
assert len(tail_nid) == len(g.tail_nid)
np.testing.assert_equal(tail_nid, ldst)
def check_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
etype = np.random.randint(0, 10, size=g.number_of_edges(), dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Test the homogeneous graph.
batch_size = 50
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst)[i])
neg_e = int(F.asnumpy(neg_eid)[i])
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False
# with reset = False (default setting)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# check replacement = False
# with reset = True
total_samples = 0
max_samples = 2 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) <= batch_size
total_samples += len(pos_leid)
if (total_samples >= max_samples):
break
assert total_samples >= max_samples
# Test the knowledge graph.
total_samples = 0
for _, neg_edges in EdgeSampler(g, batch_size,
negative_mode=mode,
reset=False,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
def check_weighted_negative_sampler(mode, exclude_positive, neg_size):
g = generate_rand_graph(100)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
etype = np.random.randint(0, 10, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
pos_gsrc, pos_gdst, pos_geid = g.all_edges(form='all', order='eid')
pos_map = {}
for i in range(len(pos_geid)):
pos_d = int(F.asnumpy(pos_gdst[i]))
pos_e = int(F.asnumpy(pos_geid[i]))
pos_map[(pos_d, pos_e)] = int(F.asnumpy(pos_gsrc[i]))
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 50
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
pos_lsrc, pos_ldst, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert_array_equal(F.asnumpy(F.gather_row(pos_edges.parent_eid, pos_leid)),
F.asnumpy(g.edge_ids(F.gather_row(pos_edges.parent_nid, pos_lsrc),
F.gather_row(pos_edges.parent_nid, pos_ldst))))
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
for i in range(len(neg_eid)):
neg_d = int(F.asnumpy(neg_dst[i]))
neg_e = int(F.asnumpy(neg_eid[i]))
assert (neg_d, neg_e) in pos_map
if exclude_positive:
assert int(F.asnumpy(neg_src[i])) != pos_map[(neg_d, neg_e)]
check_head_tail(neg_edges)
pos_tails = F.gather_row(pos_edges.parent_nid, pos_edges.tail_nid)
neg_tails = F.gather_row(neg_edges.parent_nid, neg_edges.tail_nid)
pos_tails = np.sort(F.asnumpy(pos_tails))
neg_tails = np.sort(F.asnumpy(neg_tails))
np.testing.assert_equal(pos_tails, neg_tails)
exist = neg_edges.edata['false_neg']
if exclude_positive:
assert np.sum(F.asnumpy(exist) == 0) == len(exist)
else:
assert F.array_equal(g.has_edges_between(neg_src, neg_dst), exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# Test the knowledge graph with edge/node weight provied.
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
neg_lsrc, neg_ldst, neg_leid = neg_edges.all_edges(form='all', order='eid')
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
neg_eid = F.gather_row(neg_edges.parent_eid, neg_leid)
exists = neg_edges.edata['false_neg']
neg_edges.edata['etype'] = F.gather_row(g.edata['etype'], neg_eid)
for i in range(len(neg_eid)):
u, v = F.asnumpy(neg_src[i]), F.asnumpy(neg_dst[i])
if g.has_edge_between(u, v):
eid = g.edge_id(u, v)
etype = g.edata['etype'][eid]
exist = neg_edges.edata['etype'][i] == etype
assert F.asnumpy(exists[i]) == F.asnumpy(exist)
total_samples += batch_size
assert total_samples <= num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=False,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = True with pos edges no-uniform sample
# with reset = True
total_samples = 0
max_samples = 4 * num_edges
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
reset=True,
edge_weight=edge_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# check replacement = False with pos/neg edges no-uniform sample
# reset = False
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=False,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
assert total_samples == num_edges
# check replacement = False with pos/neg edges no-uniform sample
# reset = True
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=False,
reset=True,
edge_weight=edge_weight,
node_weight=node_weight,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=exclude_positive,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
assert len(pos_leid) == batch_size
total_samples += len(pos_leid)
if total_samples >= max_samples:
break
assert total_samples == max_samples
# Check Rate
dgl.random.seed(0)
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
num_nodes = g.number_of_nodes()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[0] = F.sum(edge_weight, dim=0)
node_weight = F.copy_to(F.tensor(np.full((num_nodes,), 1, dtype=np.float32)), F.cpu())
node_weight[-1] = F.sum(node_weight, dim=0) / 200
etype = np.random.randint(0, 20, size=num_edges, dtype=np.int64)
g.edata['etype'] = F.copy_to(F.tensor(etype), F.cpu())
# Test w/o node weight.
max_samples = num_edges // 5
total_samples = 0
# Test the knowledge graph with edge weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = neg_edges.parent_nid[neg_lsrc]
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = neg_edges.parent_nid[neg_ldst]
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate_0 = node_sampled[0] / node_sampled.sum()
node_tail_half_cnt = node_sampled[node_sampled.shape[0] // 2:-1].sum()
node_rate_tail_half = node_tail_half_cnt / node_sampled.sum()
assert node_rate_0 < 0.02
assert np.allclose(node_rate_tail_half, 0.5, atol=0.02)
# Test the knowledge graph with edge/node weight provied.
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
node_sampled = np.full((num_nodes,), 0, dtype=np.int32)
total_samples = 0
for pos_edges, neg_edges in EdgeSampler(g, batch_size,
replacement=True,
edge_weight=edge_weight,
node_weight=node_weight,
shuffle=True,
negative_mode=mode,
neg_sample_size=neg_size,
exclude_positive=False,
relations=g.edata['etype'],
return_false_neg=True):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
neg_lsrc, neg_ldst, _ = neg_edges.all_edges(form='all', order='eid')
if 'head' in mode:
neg_src = F.gather_row(neg_edges.parent_nid, neg_lsrc)
np.add.at(node_sampled, F.asnumpy(neg_src), 1)
else:
neg_dst = F.gather_row(neg_edges.parent_nid, neg_ldst)
np.add.at(node_sampled, F.asnumpy(neg_dst), 1)
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
total_samples += batch_size
if total_samples > max_samples:
break
# Check rate here
edge_rate_0 = edge_sampled[0] / edge_sampled.sum()
edge_tail_half_cnt = edge_sampled[edge_sampled.shape[0] // 2:-1].sum()
edge_rate_tail_half = edge_tail_half_cnt / edge_sampled.sum()
assert np.allclose(edge_rate_0, 0.5, atol=0.05)
assert np.allclose(edge_rate_tail_half, 0.25, atol=0.05)
node_rate = node_sampled[-1] / node_sampled.sum()
node_rate_a = np.average(node_sampled[:50]) / node_sampled.sum()
node_rate_b = np.average(node_sampled[50:100]) / node_sampled.sum()
# As neg sampling does not contain duplicate nodes,
# this test takes some acceptable variation on the sample rate.
assert np.allclose(node_rate, node_rate_a * 5, atol=0.002)
assert np.allclose(node_rate_a, node_rate_b, atol=0.0002)
def check_positive_edge_sampler():
g = generate_rand_graph(1000)
num_edges = g.number_of_edges()
edge_weight = F.copy_to(F.tensor(np.full((num_edges,), 1, dtype=np.float32)), F.cpu())
edge_weight[num_edges-1] = num_edges ** 3
EdgeSampler = getattr(dgl.contrib.sampling, 'EdgeSampler')
# Correctness check
# Test the homogeneous graph.
batch_size = 128
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
edge_sampled = np.full((num_edges,), 0, dtype=np.int32)
for pos_edges in EdgeSampler(g, batch_size,
reset=False,
shuffle=True,
edge_weight=edge_weight):
_, _, pos_leid = pos_edges.all_edges(form='all', order='eid')
np.add.at(edge_sampled, F.asnumpy(pos_edges.parent_eid[pos_leid]), 1)
truth = np.full((num_edges,), 1, dtype=np.int32)
edge_sampled = edge_sampled[:num_edges]
assert np.array_equal(truth, edge_sampled)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support item assignment")
def test_negative_sampler():
check_negative_sampler('chunk-head', False, 10)
check_negative_sampler('head', True, 10)
check_negative_sampler('head', False, 10)
check_weighted_negative_sampler('chunk-head', False, 10)
check_weighted_negative_sampler('head', True, 10)
check_weighted_negative_sampler('head', False, 10)
check_positive_edge_sampler()
#disable this check for now. It might take too long time.
#check_negative_sampler('head', False, 100)
if __name__ == '__main__':
test_create_full()
test_1neighbor_sampler_all()
test_10neighbor_sampler_all()
test_1neighbor_sampler()
test_10neighbor_sampler()
test_layer_sampler()
test_nonuniform_neighbor_sampler()
test_setseed()
test_negative_sampler()
| [((9, 0, 9, 18), 'numpy.random.seed', 'np.random.seed', ({(9, 15, 9, 17): '(42)'}, {}), '(42)', True, 'import numpy as np\n'), ((161, 1, 161, 103), 'unittest.skipIf', 'unittest.skipIf', (), '', False, 'import unittest\n'), ((701, 1, 701, 103), 'unittest.skipIf', 'unittest.skipIf', (), '', False, 'import unittest\n'), ((13, 11, 13, 43), 'dgl.DGLGraph', 'dgl.DGLGraph', (), '', False, 'import dgl\n'), ((17, 14, 17, 69), 'dgl.contrib.sampling.sampler.create_full_nodeflow', 'dgl.contrib.sampling.sampler.create_full_nodeflow', ({(17, 64, 17, 65): 'g', (17, 67, 17, 68): '5'}, {}), '(g, 5)', False, 'import dgl\n'), ((43, 14, 43, 32), 'backend.asnumpy', 'F.asnumpy', ({(43, 24, 43, 31): 'seed_id'}, {}), '(seed_id)', True, 'import backend as F\n'), ((50, 16, 50, 36), 'backend.asnumpy', 'F.asnumpy', ({(50, 26, 50, 35): 'child_src'}, {}), '(child_src)', True, 'import backend as F\n'), ((57, 10, 57, 24), 'backend.asnumpy', 'F.asnumpy', ({(57, 20, 57, 23): 'src'}, {}), '(src)', True, 'import backend as F\n'), ((64, 16, 65, 67), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (), '', False, 'import dgl\n'), ((75, 16, 76, 82), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (), '', False, 'import dgl\n'), ((98, 16, 99, 85), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (), '', False, 'import dgl\n'), ((120, 17, 120, 40), 'numpy.hstack', 'np.hstack', ({(120, 27, 120, 39): 'seed_batches'}, {}), '(seed_batches)', True, 'import numpy as np\n'), ((174, 8, 174, 22), 'dgl.DGLGraph', 'dgl.DGLGraph', ({}, {}), '()', False, 'import dgl\n'), ((185, 14, 186, 64), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (), '', False, 'import dgl\n'), ((195, 14, 196, 64), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (), '', False, 'import dgl\n'), ((209, 4, 209, 23), 'dgl.random.seed', 'dgl.random.seed', ({(209, 20, 209, 22): '(42)'}, {}), '(42)', False, 'import dgl\n'), ((210, 16, 211, 67), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (), '', False, 'import dgl\n'), ((216, 4, 216, 23), 'dgl.random.seed', 'dgl.random.seed', ({(216, 20, 216, 22): '(42)'}, {}), '(42)', False, 'import dgl\n'), ((232, 4, 232, 43), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ({(232, 28, 232, 32): 'lsrc', (232, 34, 232, 42): 'head_nid'}, {}), '(lsrc, head_nid)', True, 'import numpy as np\n'), ((237, 4, 237, 43), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ({(237, 28, 237, 36): 'tail_nid', (237, 38, 237, 42): 'ldst'}, {}), '(tail_nid, ldst)', True, 'import numpy as np\n'), ((393, 12, 393, 68), 'numpy.random.randint', 'np.random.randint', (), '', True, 'import numpy as np\n'), ((574, 4, 574, 22), 'dgl.random.seed', 'dgl.random.seed', ({(574, 20, 574, 21): '(0)'}, {}), '(0)', False, 'import dgl\n'), ((579, 21, 579, 46), 'backend.sum', 'F.sum', (), '', True, 'import backend as F\n'), ((582, 12, 582, 68), 'numpy.random.randint', 'np.random.randint', (), '', True, 'import numpy as np\n'), ((589, 19, 589, 59), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((590, 19, 590, 59), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((617, 11, 617, 51), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((618, 11, 618, 60), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((624, 11, 624, 59), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((627, 19, 627, 59), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((628, 19, 628, 59), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((657, 11, 657, 51), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((658, 11, 658, 60), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((665, 11, 665, 62), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((666, 11, 666, 61), 'numpy.allclose', 'np.allclose', (), '', True, 'import numpy as np\n'), ((679, 19, 679, 59), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((685, 12, 685, 52), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((687, 11, 687, 46), 'numpy.array_equal', 'np.array_equal', ({(687, 26, 687, 31): 'truth', (687, 33, 687, 45): 'edge_sampled'}, {}), '(truth, edge_sampled)', True, 'import numpy as np\n'), ((689, 19, 689, 59), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((696, 12, 696, 52), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((698, 11, 698, 46), 'numpy.array_equal', 'np.array_equal', ({(698, 26, 698, 31): 'truth', (698, 33, 698, 45): 'edge_sampled'}, {}), '(truth, edge_sampled)', True, 'import numpy as np\n'), ((37, 15, 37, 39), 'backend.array_equal', 'F.array_equal', ({(37, 29, 37, 33): 'src1', (37, 35, 37, 38): 'src'}, {}), '(src1, src)', True, 'import backend as F\n'), ((94, 15, 94, 39), 'backend.array_equal', 'F.array_equal', ({(94, 29, 94, 33): 'src1', (94, 35, 94, 38): 'src'}, {}), '(src1, src)', True, 'import backend as F\n'), ((217, 29, 218, 67), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (), '', False, 'import dgl\n'), ((222, 29, 223, 67), 'dgl.contrib.sampling.NeighborSampler', 'dgl.contrib.sampling.NeighborSampler', (), '', False, 'import dgl\n'), ((229, 21, 229, 36), 'backend.asnumpy', 'F.asnumpy', ({(229, 31, 229, 35): 'lsrc'}, {}), '(lsrc)', True, 'import backend as F\n'), ((230, 25, 230, 46), 'backend.asnumpy', 'F.asnumpy', ({(230, 35, 230, 45): 'g.head_nid'}, {}), '(g.head_nid)', True, 'import backend as F\n'), ((234, 21, 234, 36), 'backend.asnumpy', 'F.asnumpy', ({(234, 31, 234, 35): 'ldst'}, {}), '(ldst)', True, 'import backend as F\n'), ((235, 25, 235, 46), 'backend.asnumpy', 'F.asnumpy', ({(235, 35, 235, 45): 'g.tail_nid'}, {}), '(g.tail_nid)', True, 'import backend as F\n'), ((244, 33, 244, 48), 'backend.tensor', 'F.tensor', ({(244, 42, 244, 47): 'etype'}, {}), '(etype)', True, 'import backend as F\n'), ((244, 50, 244, 57), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((271, 18, 271, 62), 'backend.gather_row', 'F.gather_row', ({(271, 31, 271, 51): 'neg_edges.parent_nid', (271, 53, 271, 61): 'neg_lsrc'}, {}), '(neg_edges.parent_nid, neg_lsrc)', True, 'import backend as F\n'), ((272, 18, 272, 62), 'backend.gather_row', 'F.gather_row', ({(272, 31, 272, 51): 'neg_edges.parent_nid', (272, 53, 272, 61): 'neg_ldst'}, {}), '(neg_edges.parent_nid, neg_ldst)', True, 'import backend as F\n'), ((273, 18, 273, 62), 'backend.gather_row', 'F.gather_row', ({(273, 31, 273, 51): 'neg_edges.parent_eid', (273, 53, 273, 61): 'neg_leid'}, {}), '(neg_edges.parent_eid, neg_leid)', True, 'import backend as F\n'), ((282, 20, 282, 74), 'backend.gather_row', 'F.gather_row', ({(282, 33, 282, 53): 'pos_edges.parent_nid', (282, 55, 282, 73): 'pos_edges.tail_nid'}, {}), '(pos_edges.parent_nid, pos_edges.tail_nid)', True, 'import backend as F\n'), ((283, 20, 283, 74), 'backend.gather_row', 'F.gather_row', ({(283, 33, 283, 53): 'neg_edges.parent_nid', (283, 55, 283, 73): 'neg_edges.tail_nid'}, {}), '(neg_edges.parent_nid, neg_edges.tail_nid)', True, 'import backend as F\n'), ((286, 8, 286, 53), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ({(286, 32, 286, 41): 'pos_tails', (286, 43, 286, 52): 'neg_tails'}, {}), '(pos_tails, neg_tails)', True, 'import numpy as np\n'), ((372, 18, 372, 62), 'backend.gather_row', 'F.gather_row', ({(372, 31, 372, 51): 'neg_edges.parent_nid', (372, 53, 372, 61): 'neg_lsrc'}, {}), '(neg_edges.parent_nid, neg_lsrc)', True, 'import backend as F\n'), ((373, 18, 373, 62), 'backend.gather_row', 'F.gather_row', ({(373, 31, 373, 51): 'neg_edges.parent_nid', (373, 53, 373, 61): 'neg_ldst'}, {}), '(neg_edges.parent_nid, neg_ldst)', True, 'import backend as F\n'), ((374, 18, 374, 62), 'backend.gather_row', 'F.gather_row', ({(374, 31, 374, 51): 'neg_edges.parent_eid', (374, 53, 374, 61): 'neg_leid'}, {}), '(neg_edges.parent_eid, neg_leid)', True, 'import backend as F\n'), ((376, 35, 376, 74), 'backend.gather_row', 'F.gather_row', ({(376, 48, 376, 64): "g.edata['etype']", (376, 66, 376, 73): 'neg_eid'}, {}), "(g.edata['etype'], neg_eid)", True, 'import backend as F\n'), ((391, 82, 391, 89), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((392, 82, 392, 89), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((394, 33, 394, 48), 'backend.tensor', 'F.tensor', ({(394, 42, 394, 47): 'etype'}, {}), '(etype)', True, 'import backend as F\n'), ((394, 50, 394, 57), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((422, 18, 422, 62), 'backend.gather_row', 'F.gather_row', ({(422, 31, 422, 51): 'neg_edges.parent_nid', (422, 53, 422, 61): 'neg_lsrc'}, {}), '(neg_edges.parent_nid, neg_lsrc)', True, 'import backend as F\n'), ((423, 18, 423, 62), 'backend.gather_row', 'F.gather_row', ({(423, 31, 423, 51): 'neg_edges.parent_nid', (423, 53, 423, 61): 'neg_ldst'}, {}), '(neg_edges.parent_nid, neg_ldst)', True, 'import backend as F\n'), ((424, 18, 424, 62), 'backend.gather_row', 'F.gather_row', ({(424, 31, 424, 51): 'neg_edges.parent_eid', (424, 53, 424, 61): 'neg_leid'}, {}), '(neg_edges.parent_eid, neg_leid)', True, 'import backend as F\n'), ((433, 20, 433, 74), 'backend.gather_row', 'F.gather_row', ({(433, 33, 433, 53): 'pos_edges.parent_nid', (433, 55, 433, 73): 'pos_edges.tail_nid'}, {}), '(pos_edges.parent_nid, pos_edges.tail_nid)', True, 'import backend as F\n'), ((434, 20, 434, 74), 'backend.gather_row', 'F.gather_row', ({(434, 33, 434, 53): 'neg_edges.parent_nid', (434, 55, 434, 73): 'neg_edges.tail_nid'}, {}), '(neg_edges.parent_nid, neg_edges.tail_nid)', True, 'import backend as F\n'), ((437, 8, 437, 53), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ({(437, 32, 437, 41): 'pos_tails', (437, 43, 437, 52): 'neg_tails'}, {}), '(pos_tails, neg_tails)', True, 'import numpy as np\n'), ((458, 18, 458, 62), 'backend.gather_row', 'F.gather_row', ({(458, 31, 458, 51): 'neg_edges.parent_nid', (458, 53, 458, 61): 'neg_lsrc'}, {}), '(neg_edges.parent_nid, neg_lsrc)', True, 'import backend as F\n'), ((459, 18, 459, 62), 'backend.gather_row', 'F.gather_row', ({(459, 31, 459, 51): 'neg_edges.parent_nid', (459, 53, 459, 61): 'neg_ldst'}, {}), '(neg_edges.parent_nid, neg_ldst)', True, 'import backend as F\n'), ((460, 18, 460, 62), 'backend.gather_row', 'F.gather_row', ({(460, 31, 460, 51): 'neg_edges.parent_eid', (460, 53, 460, 61): 'neg_leid'}, {}), '(neg_edges.parent_eid, neg_leid)', True, 'import backend as F\n'), ((462, 35, 462, 74), 'backend.gather_row', 'F.gather_row', ({(462, 48, 462, 64): "g.edata['etype']", (462, 66, 462, 73): 'neg_eid'}, {}), "(g.edata['etype'], neg_eid)", True, 'import backend as F\n'), ((485, 18, 485, 62), 'backend.gather_row', 'F.gather_row', ({(485, 31, 485, 51): 'neg_edges.parent_nid', (485, 53, 485, 61): 'neg_lsrc'}, {}), '(neg_edges.parent_nid, neg_lsrc)', True, 'import backend as F\n'), ((486, 18, 486, 62), 'backend.gather_row', 'F.gather_row', ({(486, 31, 486, 51): 'neg_edges.parent_nid', (486, 53, 486, 61): 'neg_ldst'}, {}), '(neg_edges.parent_nid, neg_ldst)', True, 'import backend as F\n'), ((487, 18, 487, 62), 'backend.gather_row', 'F.gather_row', ({(487, 31, 487, 51): 'neg_edges.parent_eid', (487, 53, 487, 61): 'neg_leid'}, {}), '(neg_edges.parent_eid, neg_leid)', True, 'import backend as F\n'), ((489, 35, 489, 74), 'backend.gather_row', 'F.gather_row', ({(489, 48, 489, 64): "g.edata['etype']", (489, 66, 489, 73): 'neg_eid'}, {}), "(g.edata['etype'], neg_eid)", True, 'import backend as F\n'), ((578, 82, 578, 89), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((580, 82, 580, 89), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((581, 22, 581, 47), 'backend.sum', 'F.sum', (), '', True, 'import backend as F\n'), ((583, 33, 583, 48), 'backend.tensor', 'F.tensor', ({(583, 42, 583, 47): 'etype'}, {}), '(etype)', True, 'import backend as F\n'), ((583, 50, 583, 57), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((661, 18, 661, 47), 'numpy.average', 'np.average', ({(661, 29, 661, 46): 'node_sampled[:50]'}, {}), '(node_sampled[:50])', True, 'import numpy as np\n'), ((662, 18, 662, 50), 'numpy.average', 'np.average', ({(662, 29, 662, 49): 'node_sampled[50:100]'}, {}), '(node_sampled[50:100])', True, 'import numpy as np\n'), ((671, 82, 671, 89), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((52, 15, 52, 35), 'numpy.unique', 'np.unique', ({(52, 25, 52, 34): 'child_src'}, {}), '(child_src)', True, 'import numpy as np\n'), ((59, 15, 59, 27), 'backend.asnumpy', 'F.asnumpy', ({(59, 25, 59, 26): 'i'}, {}), '(i)', True, 'import backend as F\n'), ((145, 31, 145, 66), 'backend.gather_row', 'F.gather_row', ({(145, 44, 145, 47): 'src', (145, 49, 145, 65): 'block_parent_eid'}, {}), '(src, block_parent_eid)', True, 'import backend as F\n'), ((146, 31, 146, 66), 'backend.gather_row', 'F.gather_row', ({(146, 44, 146, 47): 'dst', (146, 49, 146, 65): 'block_parent_eid'}, {}), '(dst, block_parent_eid)', True, 'import backend as F\n'), ((170, 16, 170, 38), 'numpy.random.randint', 'np.random.randint', ({(170, 34, 170, 37): '(100)'}, {}), '(100)', True, 'import numpy as np\n'), ((170, 40, 170, 62), 'numpy.random.randint', 'np.random.randint', ({(170, 58, 170, 61): '(100)'}, {}), '(100)', True, 'import numpy as np\n'), ((249, 20, 249, 42), 'backend.asnumpy', 'F.asnumpy', ({(249, 30, 249, 41): 'pos_gdst[i]'}, {}), '(pos_gdst[i])', True, 'import backend as F\n'), ((250, 20, 250, 42), 'backend.asnumpy', 'F.asnumpy', ({(250, 30, 250, 41): 'pos_geid[i]'}, {}), '(pos_geid[i])', True, 'import backend as F\n'), ((251, 38, 251, 60), 'backend.asnumpy', 'F.asnumpy', ({(251, 48, 251, 59): 'pos_gsrc[i]'}, {}), '(pos_gsrc[i])', True, 'import backend as F\n'), ((284, 28, 284, 48), 'backend.asnumpy', 'F.asnumpy', ({(284, 38, 284, 47): 'pos_tails'}, {}), '(pos_tails)', True, 'import backend as F\n'), ((285, 28, 285, 48), 'backend.asnumpy', 'F.asnumpy', ({(285, 38, 285, 47): 'neg_tails'}, {}), '(neg_tails)', True, 'import backend as F\n'), ((391, 37, 391, 79), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((392, 37, 392, 79), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((399, 20, 399, 42), 'backend.asnumpy', 'F.asnumpy', ({(399, 30, 399, 41): 'pos_gdst[i]'}, {}), '(pos_gdst[i])', True, 'import backend as F\n'), ((400, 20, 400, 42), 'backend.asnumpy', 'F.asnumpy', ({(400, 30, 400, 41): 'pos_geid[i]'}, {}), '(pos_geid[i])', True, 'import backend as F\n'), ((401, 38, 401, 60), 'backend.asnumpy', 'F.asnumpy', ({(401, 48, 401, 59): 'pos_gsrc[i]'}, {}), '(pos_gsrc[i])', True, 'import backend as F\n'), ((435, 28, 435, 48), 'backend.asnumpy', 'F.asnumpy', ({(435, 38, 435, 47): 'pos_tails'}, {}), '(pos_tails)', True, 'import backend as F\n'), ((436, 28, 436, 48), 'backend.asnumpy', 'F.asnumpy', ({(436, 38, 436, 47): 'neg_tails'}, {}), '(neg_tails)', True, 'import backend as F\n'), ((578, 37, 578, 79), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((580, 37, 580, 79), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((608, 32, 608, 73), 'backend.asnumpy', 'F.asnumpy', ({(608, 42, 608, 72): 'pos_edges.parent_eid[pos_leid]'}, {}), '(pos_edges.parent_eid[pos_leid])', True, 'import backend as F\n'), ((643, 22, 643, 66), 'backend.gather_row', 'F.gather_row', ({(643, 35, 643, 55): 'neg_edges.parent_nid', (643, 57, 643, 65): 'neg_lsrc'}, {}), '(neg_edges.parent_nid, neg_lsrc)', True, 'import backend as F\n'), ((646, 22, 646, 66), 'backend.gather_row', 'F.gather_row', ({(646, 35, 646, 55): 'neg_edges.parent_nid', (646, 57, 646, 65): 'neg_ldst'}, {}), '(neg_edges.parent_nid, neg_ldst)', True, 'import backend as F\n'), ((648, 32, 648, 73), 'backend.asnumpy', 'F.asnumpy', ({(648, 42, 648, 72): 'pos_edges.parent_eid[pos_leid]'}, {}), '(pos_edges.parent_eid[pos_leid])', True, 'import backend as F\n'), ((671, 37, 671, 79), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((684, 32, 684, 73), 'backend.asnumpy', 'F.asnumpy', ({(684, 42, 684, 72): 'pos_edges.parent_eid[pos_leid]'}, {}), '(pos_edges.parent_eid[pos_leid])', True, 'import backend as F\n'), ((695, 32, 695, 73), 'backend.asnumpy', 'F.asnumpy', ({(695, 42, 695, 72): 'pos_edges.parent_eid[pos_leid]'}, {}), '(pos_edges.parent_eid[pos_leid])', True, 'import backend as F\n'), ((12, 11, 12, 60), 'scipy.sparse.random', 'sp.sparse.random', (), '', True, 'import scipy as sp\n'), ((40, 18, 40, 30), 'numpy.sort', 'np.sort', ({(40, 26, 40, 29): 'arr'}, {}), '(arr)', True, 'import numpy as np\n'), ((118, 45, 118, 59), 'backend.asnumpy', 'F.asnumpy', ({(118, 55, 118, 58): 'nid'}, {}), '(nid)', True, 'import backend as F\n'), ((131, 30, 131, 44), 'backend.asnumpy', 'F.asnumpy', ({(131, 40, 131, 43): 'nid'}, {}), '(nid)', True, 'import backend as F\n'), ((134, 30, 134, 44), 'backend.asnumpy', 'F.asnumpy', ({(134, 40, 134, 43): 'eid'}, {}), '(eid)', True, 'import backend as F\n'), ((141, 48, 141, 80), 'backend.gather_row', 'F.gather_row', ({(141, 61, 141, 68): 'sub_src', (141, 70, 141, 79): 'block_eid'}, {}), '(sub_src, block_eid)', True, 'import backend as F\n'), ((142, 48, 142, 80), 'backend.gather_row', 'F.gather_row', ({(142, 61, 142, 68): 'sub_dst', (142, 70, 142, 79): 'block_eid'}, {}), '(sub_dst, block_eid)', True, 'import backend as F\n'), ((148, 26, 148, 66), 'backend.asnumpy', 'F.asnumpy', ({(148, 36, 148, 65): '(block_src == block_parent_src)'}, {}), '(block_src == block_parent_src)', True, 'import backend as F\n'), ((180, 33, 180, 40), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((181, 47, 181, 54), 'backend.cpu', 'F.cpu', ({}, {}), '()', True, 'import backend as F\n'), ((265, 37, 265, 81), 'backend.gather_row', 'F.gather_row', ({(265, 50, 265, 70): 'pos_edges.parent_eid', (265, 72, 265, 80): 'pos_leid'}, {}), '(pos_edges.parent_eid, pos_leid)', True, 'import backend as F\n'), ((378, 19, 378, 40), 'backend.asnumpy', 'F.asnumpy', ({(378, 29, 378, 39): 'neg_src[i]'}, {}), '(neg_src[i])', True, 'import backend as F\n'), ((378, 42, 378, 63), 'backend.asnumpy', 'F.asnumpy', ({(378, 52, 378, 62): 'neg_dst[i]'}, {}), '(neg_dst[i])', True, 'import backend as F\n'), ((417, 37, 417, 81), 'backend.gather_row', 'F.gather_row', ({(417, 50, 417, 70): 'pos_edges.parent_eid', (417, 72, 417, 80): 'pos_leid'}, {}), '(pos_edges.parent_eid, pos_leid)', True, 'import backend as F\n'), ((426, 24, 426, 45), 'backend.asnumpy', 'F.asnumpy', ({(426, 34, 426, 44): 'neg_dst[i]'}, {}), '(neg_dst[i])', True, 'import backend as F\n'), ((427, 24, 427, 45), 'backend.asnumpy', 'F.asnumpy', ({(427, 34, 427, 44): 'neg_eid[i]'}, {}), '(neg_eid[i])', True, 'import backend as F\n'), ((464, 19, 464, 40), 'backend.asnumpy', 'F.asnumpy', ({(464, 29, 464, 39): 'neg_src[i]'}, {}), '(neg_src[i])', True, 'import backend as F\n'), ((464, 42, 464, 63), 'backend.asnumpy', 'F.asnumpy', ({(464, 52, 464, 62): 'neg_dst[i]'}, {}), '(neg_dst[i])', True, 'import backend as F\n'), ((491, 19, 491, 40), 'backend.asnumpy', 'F.asnumpy', ({(491, 29, 491, 39): 'neg_src[i]'}, {}), '(neg_src[i])', True, 'import backend as F\n'), ((491, 42, 491, 63), 'backend.asnumpy', 'F.asnumpy', ({(491, 52, 491, 62): 'neg_dst[i]'}, {}), '(neg_dst[i])', True, 'import backend as F\n'), ((604, 36, 604, 54), 'backend.asnumpy', 'F.asnumpy', ({(604, 46, 604, 53): 'neg_src'}, {}), '(neg_src)', True, 'import backend as F\n'), ((607, 36, 607, 54), 'backend.asnumpy', 'F.asnumpy', ({(607, 46, 607, 53): 'neg_dst'}, {}), '(neg_dst)', True, 'import backend as F\n'), ((644, 36, 644, 54), 'backend.asnumpy', 'F.asnumpy', ({(644, 46, 644, 53): 'neg_src'}, {}), '(neg_src)', True, 'import backend as F\n'), ((647, 36, 647, 54), 'backend.asnumpy', 'F.asnumpy', ({(647, 46, 647, 53): 'neg_dst'}, {}), '(neg_dst)', True, 'import backend as F\n'), ((266, 48, 266, 92), 'backend.gather_row', 'F.gather_row', ({(266, 61, 266, 81): 'pos_edges.parent_nid', (266, 83, 266, 91): 'pos_lsrc'}, {}), '(pos_edges.parent_nid, pos_lsrc)', True, 'import backend as F\n'), ((267, 48, 267, 92), 'backend.gather_row', 'F.gather_row', ({(267, 61, 267, 81): 'pos_edges.parent_nid', (267, 83, 267, 91): 'pos_ldst'}, {}), '(pos_edges.parent_nid, pos_ldst)', True, 'import backend as F\n'), ((275, 24, 275, 42), 'backend.asnumpy', 'F.asnumpy', ({(275, 34, 275, 41): 'neg_dst'}, {}), '(neg_dst)', True, 'import backend as F\n'), ((276, 24, 276, 42), 'backend.asnumpy', 'F.asnumpy', ({(276, 34, 276, 41): 'neg_eid'}, {}), '(neg_eid)', True, 'import backend as F\n'), ((383, 23, 383, 43), 'backend.asnumpy', 'F.asnumpy', ({(383, 33, 383, 42): 'exists[i]'}, {}), '(exists[i])', True, 'import backend as F\n'), ((383, 47, 383, 63), 'backend.asnumpy', 'F.asnumpy', ({(383, 57, 383, 62): 'exist'}, {}), '(exist)', True, 'import backend as F\n'), ((418, 48, 418, 92), 'backend.gather_row', 'F.gather_row', ({(418, 61, 418, 81): 'pos_edges.parent_nid', (418, 83, 418, 91): 'pos_lsrc'}, {}), '(pos_edges.parent_nid, pos_lsrc)', True, 'import backend as F\n'), ((419, 48, 419, 92), 'backend.gather_row', 'F.gather_row', ({(419, 61, 419, 81): 'pos_edges.parent_nid', (419, 83, 419, 91): 'pos_ldst'}, {}), '(pos_edges.parent_nid, pos_ldst)', True, 'import backend as F\n'), ((469, 23, 469, 43), 'backend.asnumpy', 'F.asnumpy', ({(469, 33, 469, 42): 'exists[i]'}, {}), '(exists[i])', True, 'import backend as F\n'), ((469, 47, 469, 63), 'backend.asnumpy', 'F.asnumpy', ({(469, 57, 469, 62): 'exist'}, {}), '(exist)', True, 'import backend as F\n'), ((496, 23, 496, 43), 'backend.asnumpy', 'F.asnumpy', ({(496, 33, 496, 42): 'exists[i]'}, {}), '(exists[i])', True, 'import backend as F\n'), ((496, 47, 496, 63), 'backend.asnumpy', 'F.asnumpy', ({(496, 57, 496, 62): 'exist'}, {}), '(exist)', True, 'import backend as F\n'), ((128, 65, 128, 83), 'backend.asnumpy', 'F.asnumpy', ({(128, 75, 128, 82): 'sub_nid'}, {}), '(sub_nid)', True, 'import backend as F\n'), ((279, 27, 279, 48), 'backend.asnumpy', 'F.asnumpy', ({(279, 37, 279, 47): 'neg_src[i]'}, {}), '(neg_src[i])', True, 'import backend as F\n'), ((290, 26, 290, 42), 'backend.asnumpy', 'F.asnumpy', ({(290, 36, 290, 41): 'exist'}, {}), '(exist)', True, 'import backend as F\n'), ((430, 27, 430, 48), 'backend.asnumpy', 'F.asnumpy', ({(430, 37, 430, 47): 'neg_src[i]'}, {}), '(neg_src[i])', True, 'import backend as F\n'), ((441, 26, 441, 42), 'backend.asnumpy', 'F.asnumpy', ({(441, 36, 441, 41): 'exist'}, {}), '(exist)', True, 'import backend as F\n')] |
srinivasreddych/aws-orbit-workbench | plugins/voila/voila/__init__.py | 2d154addff58d26f5459a73c06148aaf5e9fad46 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, Optional
import aws_orbit
from aws_orbit.plugins import hooks
from aws_orbit.remote_files import helm
if TYPE_CHECKING:
from aws_orbit.models.context import Context, TeamContext
_logger: logging.Logger = logging.getLogger("aws_orbit")
CHART_PATH = os.path.join(os.path.dirname(__file__))
@hooks.deploy
def deploy(
plugin_id: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
_logger.debug("Team Env name: %s | Team name: %s", context.name, team_context.name)
plugin_id = plugin_id.replace("_", "-")
_logger.debug("plugin_id: %s", plugin_id)
chart_path = helm.create_team_charts_copy(team_context=team_context, path=CHART_PATH, target_path=plugin_id)
vars: Dict[str, Optional[str]] = dict(
team=team_context.name,
region=context.region,
account_id=context.account_id,
env_name=context.name,
restart_policy=parameters["restartPolicy"] if "restartPolicy" in parameters else "Always",
path=parameters["path"] if "path" in parameters else "/home/jovyan/shared/voila",
options=parameters["options"] if "options" in parameters else "",
plugin_id=plugin_id,
toolkit_s3_bucket=context.toolkit.s3_bucket,
image_pull_policy="Always" if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent",
image=parameters["image"] if "image" in parameters else team_context.final_image_address,
sts_ep="legacy" if context.networking.data.internet_accessible else "regional",
)
repo_location = team_context.team_helm_repository
if repo_location:
repo = team_context.name
helm.add_repo(repo=repo, repo_location=repo_location)
chart_name, chart_version, chart_package = helm.package_chart(repo=repo, chart_path=chart_path, values=vars)
helm.install_chart(
repo=repo,
namespace=team_context.name,
name=f"{team_context.name}-{plugin_id}",
chart_name=chart_name,
chart_version=chart_version,
)
@hooks.destroy
def destroy(
plugin_id: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
_logger.debug(
"Delete Plugin %s of Team Env name: %s | Team name: %s",
plugin_id,
context.name,
team_context.name,
)
helm.uninstall_chart(f"{team_context.name}-{plugin_id}", namespace=team_context.name)
| [((25, 26, 25, 56), 'logging.getLogger', 'logging.getLogger', ({(25, 44, 25, 55): '"""aws_orbit"""'}, {}), "('aws_orbit')", False, 'import logging\n'), ((26, 26, 26, 51), 'os.path.dirname', 'os.path.dirname', ({(26, 42, 26, 50): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((39, 17, 39, 112), 'aws_orbit.remote_files.helm.create_team_charts_copy', 'helm.create_team_charts_copy', (), '', False, 'from aws_orbit.remote_files import helm\n'), ((83, 4, 83, 89), 'aws_orbit.remote_files.helm.uninstall_chart', 'helm.uninstall_chart', (), '', False, 'from aws_orbit.remote_files import helm\n'), ((59, 8, 59, 61), 'aws_orbit.remote_files.helm.add_repo', 'helm.add_repo', (), '', False, 'from aws_orbit.remote_files import helm\n'), ((60, 51, 60, 116), 'aws_orbit.remote_files.helm.package_chart', 'helm.package_chart', (), '', False, 'from aws_orbit.remote_files import helm\n'), ((61, 8, 67, 9), 'aws_orbit.remote_files.helm.install_chart', 'helm.install_chart', (), '', False, 'from aws_orbit.remote_files import helm\n'), ((51, 38, 51, 77), 'aws_orbit.__version__.endswith', 'aws_orbit.__version__.endswith', ({(51, 69, 51, 76): '""".dev0"""'}, {}), "('.dev0')", False, 'import aws_orbit\n')] |
aarunsai81/netapp | tools/generate_driver_list.py | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | #! /usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generate list of cinder drivers"""
import argparse
import os
from cinder.interface import util
parser = argparse.ArgumentParser(prog="generate_driver_list")
parser.add_argument("--format", default='str', choices=['str', 'dict'],
help="Output format type")
# Keep backwards compatibilty with the gate-docs test
# The tests pass ['docs'] on the cmdln, but it's never been used.
parser.add_argument("output_list", default=None, nargs='?')
CI_WIKI_ROOT = "https://wiki.openstack.org/wiki/ThirdPartySystems/"
class Output(object):
def __init__(self, base_dir, output_list):
# At this point we don't care what was passed in, just a trigger
# to write this out to the doc tree for now
self.driver_file = None
if output_list:
self.driver_file = open(
'%s/doc/source/drivers.rst' % base_dir, 'w+')
self.driver_file.write('===================\n')
self.driver_file.write('Available Drivers\n')
self.driver_file.write('===================\n\n')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.driver_file:
self.driver_file.close()
def write(self, text):
if self.driver_file:
self.driver_file.write('%s\n' % text)
else:
print(text)
def format_description(desc, output):
desc = desc or '<None>'
lines = desc.rstrip('\n').split('\n')
for line in lines:
output.write(' %s' % line)
def print_drivers(drivers, config_name, output):
for driver in sorted(drivers, key=lambda x: x.class_fqn):
output.write(driver.class_name)
output.write('-' * len(driver.class_name))
if driver.version:
output.write('* Version: %s' % driver.version)
output.write('* %s=%s' % (config_name, driver.class_fqn))
if driver.ci_wiki_name:
output.write('* CI info: %s%s' % (CI_WIKI_ROOT,
driver.ci_wiki_name))
output.write('* Description:')
format_description(driver.desc, output)
output.write('')
output.write('')
def output_str(cinder_root, args):
with Output(cinder_root, args.output_list) as output:
output.write('Volume Drivers')
output.write('==============')
print_drivers(util.get_volume_drivers(), 'volume_driver', output)
output.write('Backup Drivers')
output.write('==============')
print_drivers(util.get_backup_drivers(), 'backup_driver', output)
output.write('FC Zone Manager Drivers')
output.write('=======================')
print_drivers(util.get_fczm_drivers(), 'zone_driver', output)
def collect_driver_info(driver):
"""Build the dictionary that describes this driver."""
info = {'name': driver.class_name,
'version': driver.version,
'fqn': driver.class_fqn,
'description': driver.desc,
'ci_wiki_name': driver.ci_wiki_name}
return info
def output_dict():
import pprint
driver_list = []
drivers = util.get_volume_drivers()
for driver in drivers:
driver_list.append(collect_driver_info(driver))
pprint.pprint(driver_list)
def main():
tools_dir = os.path.dirname(os.path.abspath(__file__))
cinder_root = os.path.dirname(tools_dir)
cur_dir = os.getcwd()
os.chdir(cinder_root)
args = parser.parse_args()
try:
if args.format == 'str':
output_str(cinder_root, args)
elif args.format == 'dict':
output_dict()
finally:
os.chdir(cur_dir)
if __name__ == '__main__':
main()
| [((23, 9, 23, 61), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((116, 14, 116, 39), 'cinder.interface.util.get_volume_drivers', 'util.get_volume_drivers', ({}, {}), '()', False, 'from cinder.interface import util\n'), ((120, 4, 120, 30), 'pprint.pprint', 'pprint.pprint', ({(120, 18, 120, 29): 'driver_list'}, {}), '(driver_list)', False, 'import pprint\n'), ((125, 18, 125, 44), 'os.path.dirname', 'os.path.dirname', ({(125, 34, 125, 43): 'tools_dir'}, {}), '(tools_dir)', False, 'import os\n'), ((126, 14, 126, 25), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((127, 4, 127, 25), 'os.chdir', 'os.chdir', ({(127, 13, 127, 24): 'cinder_root'}, {}), '(cinder_root)', False, 'import os\n'), ((124, 32, 124, 57), 'os.path.abspath', 'os.path.abspath', ({(124, 48, 124, 56): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((137, 8, 137, 25), 'os.chdir', 'os.chdir', ({(137, 17, 137, 24): 'cur_dir'}, {}), '(cur_dir)', False, 'import os\n'), ((89, 22, 89, 47), 'cinder.interface.util.get_volume_drivers', 'util.get_volume_drivers', ({}, {}), '()', False, 'from cinder.interface import util\n'), ((93, 22, 93, 47), 'cinder.interface.util.get_backup_drivers', 'util.get_backup_drivers', ({}, {}), '()', False, 'from cinder.interface import util\n'), ((97, 22, 97, 45), 'cinder.interface.util.get_fczm_drivers', 'util.get_fczm_drivers', ({}, {}), '()', False, 'from cinder.interface import util\n')] |
maniegley/python | Disp_pythonScript.py | 0e3a98cbff910cc78b2c0386a9cca6c5bb20eefc | import sys
f = open("/home/vader/Desktop/test.py", "r")
#read all file
python_script = f.read()
print(python_script)
| [] |
grussr/email-file-attachment | email_file.py | afa65b679b3c88b419643e216b9942fdefeaf9fc | import smtplib
import argparse
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
import configparser
import json
def send_mail(send_from, send_to, subject, text, files=None,
server="127.0.0.1", use_ssl=False, username=None, password=None):
assert isinstance(send_to, list)
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(text))
for f in files or []:
with open(f, "rb") as fil:
part = MIMEApplication(
fil.read(),
Name=basename(f)
)
# After the file is closed
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(f)
msg.attach(part)
print server
if use_ssl == True:
smtp = smtplib.SMTP_SSL(server)
else:
smtp = smtplib.SMTP(server)
if username != None and username != '':
smtp.login(username, password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
parser = argparse.ArgumentParser()
parser.add_argument('attachment')
args = parser.parse_args()
attachpath = args.attachment
config = configparser.ConfigParser()
config.read('email_file.ini')
email_from = config['DEFAULT']['From']
email_to_list = json.loads(config['DEFAULT']['To'])
email_subject = config['DEFAULT']['Subject']
email_body = config['DEFAULT']['Body']
email_server = config['DEFAULT']['Server']
email_server_ssl = bool(config['DEFAULT']['Server_SSL'])
email_server_username = config['DEFAULT']['Server_Username']
email_server_password = config['DEFAULT']['Server_Password']
send_mail(email_from, email_to_list, email_subject, email_body, [attachpath], email_server, email_server_ssl, email_server_username, email_server_password)
| [] |
gonzatorte/sw-utils | logs/constants.py | 767ec4aa8cbe1e0143f601482024ba1d9b76da64 | import logging
TRACE_LVL = int( (logging.DEBUG + logging.INFO) / 2 )
| [] |
dbatten5/dagster | examples/simple_lakehouse/simple_lakehouse/repo.py | d76e50295054ffe5a72f9b292ef57febae499528 | from dagster import repository
from simple_lakehouse.pipelines import simple_lakehouse_pipeline
@repository
def simple_lakehouse():
return [simple_lakehouse_pipeline]
| [] |
steingabelgaard/reportlab | demos/odyssey/dodyssey.py | b9a537e8386fb4b4b80e9ec89e0cdf392dbd6f61 | #Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__=''
#REPORTLAB_TEST_SCRIPT
import sys, copy, os
from reportlab.platypus import *
_NEW_PARA=os.environ.get('NEW_PARA','0')[0] in ('y','Y','1')
_REDCAP=int(os.environ.get('REDCAP','0'))
_CALLBACK=os.environ.get('CALLBACK','0')[0] in ('y','Y','1')
if _NEW_PARA:
def Paragraph(s,style):
from rlextra.radxml.para import Paragraph as PPPP
return PPPP(s,style)
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
import reportlab.rl_config
reportlab.rl_config.invariant = 1
styles = getSampleStyleSheet()
Title = "The Odyssey"
Author = "Homer"
def myTitlePage(canvas, doc):
canvas.saveState()
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
def go():
def myCanvasMaker(fn,**kw):
from reportlab.pdfgen.canvas import Canvas
canv = Canvas(fn,**kw)
# attach our callback to the canvas
canv.myOnDrawCB = myOnDrawCB
return canv
doc = BaseDocTemplate('dodyssey.pdf',showBoundary=0)
#normal frame as for SimpleFlowDocument
frameT = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height, id='normal')
#Two Columns
frame1 = Frame(doc.leftMargin, doc.bottomMargin, doc.width/2-6, doc.height, id='col1')
frame2 = Frame(doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
doc.addPageTemplates([PageTemplate(id='First',frames=frameT, onPage=myTitlePage),
PageTemplate(id='OneCol',frames=frameT, onPage=myLaterPages),
PageTemplate(id='TwoCol',frames=[frame1,frame2], onPage=myLaterPages),
])
doc.build(Elements,canvasmaker=myCanvasMaker)
Elements = []
ChapterStyle = copy.deepcopy(styles["Heading1"])
ChapterStyle.alignment = TA_CENTER
ChapterStyle.fontsize = 14
InitialStyle = copy.deepcopy(ChapterStyle)
InitialStyle.fontsize = 16
InitialStyle.leading = 20
PreStyle = styles["Code"]
def newPage():
Elements.append(PageBreak())
chNum = 0
def myOnDrawCB(canv,kind,label):
print('myOnDrawCB(%s)'%kind, 'Page number=', canv.getPageNumber(), 'label value=', label)
def chapter(txt, style=ChapterStyle):
global chNum
Elements.append(NextPageTemplate('OneCol'))
newPage()
chNum += 1
if _NEW_PARA or not _CALLBACK:
Elements.append(Paragraph(txt, style))
else:
Elements.append(Paragraph(('foo<onDraw name="myOnDrawCB" label="chap %d"/> '%chNum)+txt, style))
Elements.append(Spacer(0.2*inch, 0.3*inch))
if useTwoCol:
Elements.append(NextPageTemplate('TwoCol'))
def fTitle(txt,style=InitialStyle):
Elements.append(Paragraph(txt, style))
ParaStyle = copy.deepcopy(styles["Normal"])
ParaStyle.spaceBefore = 0.1*inch
if 'right' in sys.argv:
ParaStyle.alignment = TA_RIGHT
elif 'left' in sys.argv:
ParaStyle.alignment = TA_LEFT
elif 'justify' in sys.argv:
ParaStyle.alignment = TA_JUSTIFY
elif 'center' in sys.argv or 'centre' in sys.argv:
ParaStyle.alignment = TA_CENTER
else:
ParaStyle.alignment = TA_JUSTIFY
useTwoCol = 'notwocol' not in sys.argv
def spacer(inches):
Elements.append(Spacer(0.1*inch, inches*inch))
def p(txt, style=ParaStyle):
if _REDCAP:
fs, fe = '<font color="red" size="+2">', '</font>'
n = len(txt)
for i in range(n):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = (txt[:i]+(fs+txt[i]+fe))+txt[i+1:]
break
if _REDCAP>=2 and n>20:
j = i+len(fs)+len(fe)+1+int((n-1)/2)
while not ('a'<=txt[j]<='z' or 'A'<=txt[j]<='Z'): j += 1
txt = (txt[:j]+('<b><i><font size="+2" color="blue">'+txt[j]+'</font></i></b>'))+txt[j+1:]
if _REDCAP==3 and n>20:
n = len(txt)
fs = '<font color="green" size="+1">'
for i in range(n-1,-1,-1):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = txt[:i]+((fs+txt[i]+fe)+txt[i+1:])
break
Elements.append(Paragraph(txt, style))
firstPre = 1
def pre(txt, style=PreStyle):
global firstPre
if firstPre:
Elements.append(NextPageTemplate('OneCol'))
newPage()
firstPre = 0
spacer(0.1)
p = Preformatted(txt, style)
Elements.append(p)
def parseOdyssey(fn):
from time import time
E = []
t0=time()
text = open(fn,'r').read()
i0 = text.index('Book I')
endMarker = 'covenant of peace between the two contending parties.'
i1 = text.index(endMarker)+len(endMarker)
PREAMBLE=list(map(str.strip,text[0:i0].split('\n')))
L=list(map(str.strip,text[i0:i1].split('\n')))
POSTAMBLE=list(map(str.strip,text[i1:].split('\n')))
def ambleText(L):
while L and not L[0]: L.pop(0)
while L:
T=[]
while L and L[0]:
T.append(L.pop(0))
yield T
while L and not L[0]: L.pop(0)
def mainText(L):
while L:
B = L.pop(0)
while not L[0]: L.pop(0)
T=[]
while L and L[0]:
T.append(L.pop(0))
while not L[0]: L.pop(0)
P = []
while L and not (L[0].startswith('Book ') and len(L[0].split())==2):
E=[]
while L and L[0]:
E.append(L.pop(0))
P.append(E)
if L:
while not L[0]: L.pop(0)
yield B,T,P
t1 = time()
print("open(%s,'r').read() took %.4f seconds" %(fn,t1-t0))
E.append([spacer,2])
E.append([fTitle,'<font color="red">%s</font>' % Title, InitialStyle])
E.append([fTitle,'<font size="-4">by</font> <font color="green">%s</font>' % Author, InitialStyle])
for T in ambleText(PREAMBLE):
E.append([p,'\n'.join(T)])
for (B,T,P) in mainText(L):
E.append([chapter,B])
E.append([p,'<font size="+1" color="Blue"><b>%s</b></font>' % '\n'.join(T),ParaStyle])
for x in P:
E.append([p,' '.join(x)])
firstPre = 1
for T in ambleText(POSTAMBLE):
E.append([p,'\n'.join(T)])
t3 = time()
print("Parsing into memory took %.4f seconds" %(t3-t1))
del L
t4 = time()
print("Deleting list of lines took %.4f seconds" %(t4-t3))
for i in range(len(E)):
E[i][0](*E[i][1:])
t5 = time()
print("Moving into platypus took %.4f seconds" %(t5-t4))
del E
t6 = time()
print("Deleting list of actions took %.4f seconds" %(t6-t5))
go()
t7 = time()
print("saving to PDF took %.4f seconds" %(t7-t6))
print("Total run took %.4f seconds"%(t7-t0))
import hashlib
print('file digest: %s' % hashlib.md5(open('dodyssey.pdf','rb').read()).hexdigest())
def run():
for fn in ('odyssey.full.txt','odyssey.txt'):
if os.path.isfile(fn):
parseOdyssey(fn)
break
def doProf(profname,func,*args,**kwd):
import hotshot, hotshot.stats
prof = hotshot.Profile(profname)
prof.runcall(func)
prof.close()
stats = hotshot.stats.load(profname)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
if __name__=='__main__':
if '--prof' in sys.argv:
doProf('dodyssey.prof',run)
else:
run()
| [((24, 9, 24, 30), 'reportlab.lib.styles.getSampleStyleSheet', 'getSampleStyleSheet', ({}, {}), '()', False, 'from reportlab.lib.styles import getSampleStyleSheet\n'), ((64, 15, 64, 48), 'copy.deepcopy', 'copy.deepcopy', ({(64, 29, 64, 47): "styles['Heading1']"}, {}), "(styles['Heading1'])", False, 'import sys, copy, os\n'), ((67, 15, 67, 42), 'copy.deepcopy', 'copy.deepcopy', ({(67, 29, 67, 41): 'ChapterStyle'}, {}), '(ChapterStyle)', False, 'import sys, copy, os\n'), ((95, 12, 95, 43), 'copy.deepcopy', 'copy.deepcopy', ({(95, 26, 95, 42): "styles['Normal']"}, {}), "(styles['Normal'])", False, 'import sys, copy, os\n'), ((10, 12, 10, 40), 'os.environ.get', 'os.environ.get', ({(10, 27, 10, 35): '"""REDCAP"""', (10, 36, 10, 39): '"""0"""'}, {}), "('REDCAP', '0')", False, 'import sys, copy, os\n'), ((150, 7, 150, 13), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((186, 9, 186, 15), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((205, 9, 205, 15), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((208, 9, 208, 15), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((212, 9, 212, 15), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((215, 9, 215, 15), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((218, 9, 218, 15), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((233, 15, 233, 40), 'hotshot.Profile', 'hotshot.Profile', ({(233, 31, 233, 39): 'profname'}, {}), '(profname)', False, 'import hotshot, hotshot.stats\n'), ((236, 16, 236, 44), 'hotshot.stats.load', 'hotshot.stats.load', ({(236, 35, 236, 43): 'profname'}, {}), '(profname)', False, 'import hotshot, hotshot.stats\n'), ((9, 10, 9, 40), 'os.environ.get', 'os.environ.get', ({(9, 25, 9, 35): '"""NEW_PARA"""', (9, 36, 9, 39): '"""0"""'}, {}), "('NEW_PARA', '0')", False, 'import sys, copy, os\n'), ((11, 10, 11, 40), 'os.environ.get', 'os.environ.get', ({(11, 25, 11, 35): '"""CALLBACK"""', (11, 36, 11, 39): '"""0"""'}, {}), "('CALLBACK', '0')", False, 'import sys, copy, os\n'), ((15, 15, 15, 28), 'rlextra.radxml.para.Paragraph', 'PPPP', ({(15, 20, 15, 21): 's', (15, 22, 15, 27): 'style'}, {}), '(s, style)', True, 'from rlextra.radxml.para import Paragraph as PPPP\n'), ((42, 15, 42, 30), 'reportlab.pdfgen.canvas.Canvas', 'Canvas', ({(42, 22, 42, 24): 'fn'}, {}), '(fn, **kw)', False, 'from reportlab.pdfgen.canvas import Canvas\n'), ((227, 11, 227, 29), 'os.path.isfile', 'os.path.isfile', ({(227, 26, 227, 28): 'fn'}, {}), '(fn)', False, 'import sys, copy, os\n')] |
Traceabl3/GamestonkTerminal | tests/test_fred_fred_view.py | 922353cade542ce3f62701e10d816852805b9386 | """ econ/fred_view.py tests """
import unittest
from unittest import mock
from io import StringIO
import pandas as pd
# pylint: disable=unused-import
from gamestonk_terminal.econ.fred_view import get_fred_data # noqa: F401
fred_data_mock = """
,GDP
2019-01-01,21115.309
2019-04-01,21329.877
2019-07-01,21540.325
2019-10-01,21747.394
2020-01-01,21561.139
2020-04-01,19520.114
2020-07-01,21170.252
2020-10-01,21494.731
"""
class TestFredFredView(unittest.TestCase):
@mock.patch("gamestonk_terminal.econ.fred_view.Fred.get_series")
def test_get_fred_data(self, mock_get_series):
fred_data = pd.read_csv(StringIO(fred_data_mock), header=0, index_col=0)
mock_get_series.return_value = fred_data
get_fred_data(["--noplot"], "gdp")
| [((24, 5, 24, 68), 'unittest.mock.patch', 'mock.patch', ({(24, 16, 24, 67): '"""gamestonk_terminal.econ.fred_view.Fred.get_series"""'}, {}), "('gamestonk_terminal.econ.fred_view.Fred.get_series')", False, 'from unittest import mock\n'), ((30, 8, 30, 42), 'gamestonk_terminal.econ.fred_view.get_fred_data', 'get_fred_data', ({(30, 22, 30, 34): "['--noplot']", (30, 36, 30, 41): '"""gdp"""'}, {}), "(['--noplot'], 'gdp')", False, 'from gamestonk_terminal.econ.fred_view import get_fred_data\n'), ((26, 32, 26, 56), 'io.StringIO', 'StringIO', ({(26, 41, 26, 55): 'fred_data_mock'}, {}), '(fred_data_mock)', False, 'from io import StringIO\n')] |
jt6562/XX-Net | python27/1.0/lib/linux/gevent/pool.py | 7b78e4820a3c78c3ba3e75b3917129d17f00e9fc | # Copyright (c) 2009-2010 Denis Bilenko. See LICENSE for details.
"""Managing greenlets in a group.
The :class:`Group` class in this module abstracts a group of running greenlets.
When a greenlet dies, it's automatically removed from the group.
The :class:`Pool` which a subclass of :class:`Group` provides a way to limit
concurrency: its :meth:`spawn <Pool.spawn>` method blocks if the number of
greenlets in the pool has already reached the limit, until there is a free slot.
"""
from gevent.hub import GreenletExit, getcurrent
from gevent.greenlet import joinall, Greenlet
from gevent.timeout import Timeout
from gevent.event import Event
from gevent.coros import Semaphore, DummySemaphore
__all__ = ['Group', 'Pool']
class Group(object):
"""Maintain a group of greenlets that are still running.
Links to each item and removes it upon notification.
"""
greenlet_class = Greenlet
def __init__(self, *args):
assert len(args) <= 1, args
self.greenlets = set(*args)
if args:
for greenlet in args[0]:
greenlet.rawlink(self.discard)
# each item we kill we place in dying, to avoid killing the same greenlet twice
self.dying = set()
self._empty_event = Event()
self._empty_event.set()
def __repr__(self):
try:
classname = self.__class__.__name__
except AttributeError:
classname = 'Group' # XXX check if 2.4 really uses this line
return '<%s at %s %s>' % (classname, hex(id(self)), self.greenlets)
def __len__(self):
return len(self.greenlets)
def __contains__(self, item):
return item in self.greenlets
def __iter__(self):
return iter(self.greenlets)
def add(self, greenlet):
greenlet.rawlink(self.discard)
self.greenlets.add(greenlet)
self._empty_event.clear()
def discard(self, greenlet):
self.greenlets.discard(greenlet)
self.dying.discard(greenlet)
if not self.greenlets:
self._empty_event.set()
def start(self, greenlet):
self.add(greenlet)
greenlet.start()
def spawn(self, *args, **kwargs):
add = self.add
greenlet = self.greenlet_class.spawn(*args, **kwargs)
add(greenlet)
return greenlet
def spawn_link(self, *args, **kwargs):
greenlet = self.spawn(*args, **kwargs)
greenlet.link()
return greenlet
def spawn_link_value(self, *args, **kwargs):
greenlet = self.spawn(*args, **kwargs)
greenlet.link_value()
return greenlet
def spawn_link_exception(self, *args, **kwargs):
greenlet = self.spawn(*args, **kwargs)
greenlet.link_exception()
return greenlet
# def close(self):
# """Prevents any more tasks from being submitted to the pool"""
# self.add = RaiseException("This %s has been closed" % self.__class__.__name__)
def join(self, timeout=None, raise_error=False):
if raise_error:
greenlets = self.greenlets.copy()
self._empty_event.wait(timeout=timeout)
for greenlet in greenlets:
if greenlet.exception is not None:
raise greenlet.exception
else:
self._empty_event.wait(timeout=timeout)
def kill(self, exception=GreenletExit, block=True, timeout=None):
timer = Timeout.start_new(timeout)
try:
try:
while self.greenlets:
for greenlet in list(self.greenlets):
if greenlet not in self.dying:
greenlet.kill(exception, block=False)
self.dying.add(greenlet)
if not block:
break
joinall(self.greenlets)
except Timeout, ex:
if ex is not timer:
raise
finally:
timer.cancel()
def killone(self, greenlet, exception=GreenletExit, block=True, timeout=None):
if greenlet not in self.dying and greenlet in self.greenlets:
greenlet.kill(exception, block=False)
self.dying.add(greenlet)
if block:
greenlet.join(timeout)
def apply(self, func, args=None, kwds=None):
"""Equivalent of the apply() builtin function. It blocks till the result is ready."""
if args is None:
args = ()
if kwds is None:
kwds = {}
if getcurrent() in self:
return func(*args, **kwds)
else:
return self.spawn(func, *args, **kwds).get()
def apply_cb(self, func, args=None, kwds=None, callback=None):
result = self.apply(func, args, kwds)
if callback is not None:
Greenlet.spawn(callback, result)
return result
def apply_async(self, func, args=None, kwds=None, callback=None):
"""A variant of the apply() method which returns a Greenlet object.
If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready
callback is applied to it (unless the call failed)."""
if args is None:
args = ()
if kwds is None:
kwds = {}
if self.full():
# cannot call spawn() directly because it will block
return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
else:
greenlet = self.spawn(func, *args, **kwds)
if callback is not None:
greenlet.link(pass_value(callback))
return greenlet
def map(self, func, iterable):
greenlets = [self.spawn(func, item) for item in iterable]
return [greenlet.get() for greenlet in greenlets]
def map_cb(self, func, iterable, callback=None):
result = self.map(func, iterable)
if callback is not None:
callback(result)
return result
def map_async(self, func, iterable, callback=None):
"""
A variant of the map() method which returns a Greenlet object.
If callback is specified then it should be a callable which accepts a
single argument.
"""
return Greenlet.spawn(self.map_cb, func, iterable, callback)
def imap(self, func, iterable):
"""An equivalent of itertools.imap()
**TODO**: Fix this.
"""
return iter(self.map(func, iterable))
def imap_unordered(self, func, iterable):
"""The same as imap() except that the ordering of the results from the
returned iterator should be considered in arbitrary order."""
return IMapUnordered.spawn(self.spawn, func, iterable)
def full(self):
return False
def wait_available(self):
pass
class IMapUnordered(Greenlet):
def __init__(self, spawn, func, iterable):
from gevent.queue import Queue
Greenlet.__init__(self)
self.spawn = spawn
self.func = func
self.iterable = iterable
self.queue = Queue()
self.count = 0
def __iter__(self):
return self.queue
def _run(self):
try:
func = self.func
for item in self.iterable:
self.count += 1
self.spawn(func, item).rawlink(self._on_result)
finally:
self.__dict__.pop('spawn', None)
self.__dict__.pop('func', None)
self.__dict__.pop('iterable', None)
def _on_result(self, greenlet):
self.count -= 1
if greenlet.successful():
self.queue.put(greenlet.value)
if self.ready() and self.count <= 0:
self.queue.put(StopIteration)
def GreenletSet(*args, **kwargs):
import warnings
warnings.warn("gevent.pool.GreenletSet was renamed to gevent.pool.Group since version 0.13.0", DeprecationWarning, stacklevel=2)
return Group(*args, **kwargs)
class Pool(Group):
def __init__(self, size=None, greenlet_class=None):
if size is not None and size < 1:
raise ValueError('Invalid size for pool (positive integer or None required): %r' % (size, ))
Group.__init__(self)
self.size = size
if greenlet_class is not None:
self.greenlet_class = greenlet_class
if size is None:
self._semaphore = DummySemaphore()
else:
self._semaphore = Semaphore(size)
def wait_available(self):
self._semaphore.wait()
def full(self):
return self.free_count() <= 0
def free_count(self):
if self.size is None:
return 1
return max(0, self.size - len(self))
def start(self, greenlet):
self._semaphore.acquire()
try:
self.add(greenlet)
except:
self._semaphore.release()
raise
greenlet.start()
def spawn(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def spawn_link(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn_link(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def spawn_link_value(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn_link_value(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def spawn_link_exception(self, *args, **kwargs):
self._semaphore.acquire()
try:
greenlet = self.greenlet_class.spawn_link_exception(*args, **kwargs)
self.add(greenlet)
except:
self._semaphore.release()
raise
return greenlet
def discard(self, greenlet):
Group.discard(self, greenlet)
self._semaphore.release()
def get_values(greenlets):
joinall(greenlets)
return [x.value for x in greenlets]
class pass_value(object):
__slots__ = ['callback']
def __init__(self, callback):
self.callback = callback
def __call__(self, source):
if source.successful():
self.callback(source.value)
def __hash__(self):
return hash(self.callback)
def __eq__(self, other):
return self.callback == getattr(other, 'callback', other)
def __str__(self):
return str(self.callback)
def __repr__(self):
return repr(self.callback)
def __getattr__(self, item):
assert item != 'callback'
return getattr(self.callback, item)
| [] |
anshumandutt/AreCELearnedYet | lecarb/estimator/lw/lw_tree.py | e2286c3621dea8e4961057b6197c1e14e75aea5a | import time
import logging
from typing import Dict, Any, Tuple
import pickle
import numpy as np
import xgboost as xgb
from .common import load_lw_dataset, encode_query, decode_label
from ..postgres import Postgres
from ..estimator import Estimator
from ..utils import evaluate, run_test
from ...dataset.dataset import load_table
from ...workload.workload import Query
from ...constants import MODEL_ROOT, NUM_THREADS, PKL_PROTO
L = logging.getLogger(__name__)
class Args:
def __init__(self, **kwargs):
self.trees = 16
self.bins = 200
self.train_num = 10000
# overwrite parameters from user
self.__dict__.update(kwargs)
def train_lw_tree(seed, dataset, version, workload, params, sizelimit):
np.random.seed(seed)
# convert parameter dict of lw(nn)
L.info(f"params: {params}")
args = Args(**params)
valid_num = args.train_num // 10
table = load_table(dataset, version)
dataset = load_lw_dataset(table, workload, seed, args.bins)
train_X, train_y, _ = dataset['train']
valid_X, valid_y, valid_gt = dataset['valid']
# Train model
model_path = MODEL_ROOT / table.dataset
model_path.mkdir(parents=True, exist_ok=True)
model_file = model_path / f"{table.version}_{workload}-lwxgb_tr{args.trees}_bin{args.bins}_{args.train_num//1000}k-{seed}.pkl"
L.info(f"Start training...")
start_stmp = time.time()
model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=args.trees, random_state=seed, n_jobs=NUM_THREADS)
model.fit(train_X[:args.train_num], train_y[:args.train_num], eval_set=[(valid_X[:valid_num], valid_y[:valid_num])])
dur_min = (time.time() - start_stmp) / 60
L.info(f"Finish training, time since start: {dur_min:.4f} mins")
L.info(f"Run on valid set...")
preds = np.maximum(np.round(decode_label(model.predict(valid_X[:valid_num]))), 0.0)
gts = valid_gt[:valid_num]
L.info("Q-Error on validation set:")
_, metrics = evaluate(preds, gts)
state = {
'seed': seed,
'args': args,
'device': 'cpu',
'threads': NUM_THREADS,
'dataset': table.dataset,
'version': table.version,
'workload': workload,
'model': model,
'train_time': dur_min,
'valid_error': {workload: metrics}
# 'model_size': model_size,
}
with open(model_file, 'wb') as f:
pickle.dump(state, f, protocol=PKL_PROTO)
L.info(f'All finished! Time spent since training start: {(time.time()-start_stmp)/60:.2f} mins')
L.info(f"Model saved to {model_file}")
class LWTree(Estimator):
def __init__(self, model, model_name, pg_est, table):
super(LWTree, self).__init__(table=table, model=model_name)
self.model = model
self.pg_est = pg_est
def query(self, query):
if isinstance(query, Query):
query = encode_query(self.table, query, self.pg_est)
return self.query_vector(np.expand_dims(query, axis=0))
def query_vector(self, vec):
start_stmp = time.time()
pred = self.model.predict(vec).item()
dur_ms = (time.time() - start_stmp) * 1e3
return np.maximum(np.round(decode_label(pred)), 0.0), dur_ms
def load_lw_tree(dataset: str, model_name: str) -> Tuple[Estimator, Dict[str, Any]]:
model_file = MODEL_ROOT / dataset / f"{model_name}.pkl"
L.info(f"load model from {model_file} ...")
with open(model_file, 'rb') as f:
state = pickle.load(f)
# load model
args = state['args']
model = state['model']
table = load_table(dataset, state['version'])
pg_est = Postgres(table, args.bins, state['seed'])
estimator = LWTree(model, model_name, pg_est, table)
return estimator, state
def test_lw_tree(dataset: str, version: str, workload: str, params: Dict[str, Any], overwrite: bool) -> None:
"""
params:
model: model file name
use_cache: load processed vectors directly instead of build from queries
"""
# uniform thread number
model_file = MODEL_ROOT / dataset / f"{params['model']}.pkl"
L.info(f"Load model from {model_file} ...")
with open(model_file, 'rb') as f:
state = pickle.load(f)
# load corresonding version of table
table = load_table(dataset, state['version'])
# load model
args = state['args']
model = state['model']
pg_est = Postgres(table, args.bins, state['seed'])
estimator = LWTree(model, params['model'], pg_est, table)
L.info(f"Load and built lw(tree) estimator: {estimator}")
if params['use_cache']:
# test table might has different version with train
test_table = load_table(dataset, version)
lw_dataset = load_lw_dataset(test_table, workload, state['seed'], args.bins)
X, _, gt = lw_dataset['test']
run_test(dataset, version, workload, estimator, overwrite, lw_vec=(X, gt))
else:
run_test(dataset, version, workload, estimator, overwrite)
| [((17, 4, 17, 31), 'logging.getLogger', 'logging.getLogger', ({(17, 22, 17, 30): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((29, 4, 29, 24), 'numpy.random.seed', 'np.random.seed', ({(29, 19, 29, 23): 'seed'}, {}), '(seed)', True, 'import numpy as np\n'), ((47, 17, 47, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((48, 12, 48, 122), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', (), '', True, 'import xgboost as xgb\n'), ((73, 8, 73, 49), 'pickle.dump', 'pickle.dump', (), '', False, 'import pickle\n'), ((90, 21, 90, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((100, 16, 100, 30), 'pickle.load', 'pickle.load', ({(100, 28, 100, 29): 'f'}, {}), '(f)', False, 'import pickle\n'), ((121, 16, 121, 30), 'pickle.load', 'pickle.load', ({(121, 28, 121, 29): 'f'}, {}), '(f)', False, 'import pickle\n'), ((50, 15, 50, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((87, 33, 87, 62), 'numpy.expand_dims', 'np.expand_dims', (), '', True, 'import numpy as np\n'), ((92, 18, 92, 29), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((75, 13, 75, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
yamasampo/fsim | fsim/utils.py | 30100789b03981dd9ea11c5c2e17a3c53910f724 |
import os
import configparser
from warnings import warn
def read_control_file(control_file):
# Initialize ConfigParser object
config = configparser.ConfigParser(
strict=True,
comment_prefixes=('/*', ';', '#'),
inline_comment_prefixes=('/*', ';', '#')
)
# Parse control file
paths = config.read(control_file)
# Check number of read control files.
if len(paths) == 0:
raise FileNotFoundError(
f'Specified control file, {control_file}, is not found.')
elif len(paths) > 1:
raise TypeError(f'Iterable {type(control_file)} is given as a control '\
'file. Only one control file is supported.')
# Check sections. Only 'REQUIRED' and 'OPTIONAL' sections will be used.
assert 'REQUIRED' in config.sections(), \
f'REQUIRED section is not found in {control_file}.'
expected_sections = ['REQUIRED', 'OPTIONAL']
not_expected_sections = [
s for s in config.sections() if s not in expected_sections]
if len(not_expected_sections) >= 1:
msg = f'Unexpected sections, {", ".join(not_expected_sections)}, '\
'were found. These are not used in '\
'the analysis. If you wish to include in the analysis, please '\
'specify in "REQUIRED" or "OPTIONAL" sections.'
warn(msg)
converters_d = {
'pop_size': int,
'ns': float,
'init_mut_num': int,
'generation_num': int,
'total_site_num': int,
'var_site_num': int,
'poly_site_num': int,
'fix_site_num': int,
'output_only_fixation': lambda s: True if s == 'True' else (False if s == 'False' else -9)
}
flattened = [
(opt, converters_d[opt](v))
if opt in converters_d.keys() else (opt, v)
for s in expected_sections
for opt, v in config[s].items()
]
return dict(flattened)
def write_info_to_file(file_handle, separator, *args, **kw_args):
""" Write arguments or keyword arguments to a file. Values will be
separated by a given separator.
"""
output_lines = []
if len(args) > 0:
output_lines.append(separator.join(args))
if len(kw_args) > 0:
for k, v in kw_args.items():
output_lines.append(f'{k}{separator}{v}')
print('\n'.join(output_lines), file=file_handle)
def write_settings(file_handle, **kw_args):
print('[Setting]', file=file_handle)
write_info_to_file(file_handle, separator=' = ', **kw_args)
| [((8, 13, 12, 5), 'configparser.ConfigParser', 'configparser.ConfigParser', (), '', False, 'import configparser\n'), ((37, 8, 37, 17), 'warnings.warn', 'warn', ({(37, 13, 37, 16): 'msg'}, {}), '(msg)', False, 'from warnings import warn\n')] |
mahgadalla/pymor | src/pymortests/function.py | ee2806b4c93748e716294c42454d611415da7b5e | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2017 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import pytest
from pymor.core.pickle import dumps, loads
from pymor.functions.basic import ConstantFunction, GenericFunction
from pymortests.fixtures.function import function, picklable_function, function_argument
from pymortests.fixtures.parameter import parameters_of_type
from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function
# monkey np.testing.assert_allclose to behave the same as np.allclose
# for some reason, the default atol of np.testing.assert_allclose is 0
# while it is 1e-8 for np.allclose
real_assert_allclose = np.testing.assert_allclose
def monkey_allclose(a, b, rtol=1.e-5, atol=1.e-8):
real_assert_allclose(a, b, rtol=rtol, atol=atol)
np.testing.assert_allclose = monkey_allclose
def test_evaluate(function):
f = function
mus = parameters_of_type(f.parameter_type, 4711)
for count in [0, 1, 5, (0, 1), (2, 2, 2)]:
arg = function_argument(f, count, 454)
result = f.evaluate(arg, next(mus))
assert result.shape == arg.shape[:-1] + f.shape_range
def test_lincomb_function():
for steps in (1, 10):
x = np.linspace(0, 1, num=steps)
zero = ConstantFunction(0.0, dim_domain=steps)
for zero in (ConstantFunction(0.0, dim_domain=steps),
GenericFunction(lambda X: np.zeros(X.shape[:-1]), dim_domain=steps)):
for one in (ConstantFunction(1.0, dim_domain=steps),
GenericFunction(lambda X: np.ones(X.shape[:-1]), dim_domain=steps), 1.0):
add = (zero + one) + 0
sub = (zero - one) + np.zeros(())
neg = - zero
assert np.allclose(sub(x), [-1])
assert np.allclose(add(x), [1.0])
assert np.allclose(neg(x), [0.0])
(repr(add), str(add), repr(one), str(one)) # just to cover the respective special funcs too
mul = neg * 1.
assert np.allclose(mul(x), [0.0])
with pytest.raises(AssertionError):
zero + ConstantFunction(dim_domain=steps + 1)
with pytest.raises(AssertionError):
zero * ConstantFunction(dim_domain=steps)
with pytest.raises(AssertionError):
ConstantFunction(dim_domain=0)
def test_pickle(function):
assert_picklable(function)
def test_pickle_without_dumps_function(picklable_function):
assert_picklable_without_dumps_function(picklable_function)
def test_pickle_by_evaluation(function):
f = function
f2 = loads(dumps(f))
mus = parameters_of_type(f.parameter_type, 47)
for arg in function_argument(f, 10, 42):
mu = next(mus)
assert np.all(f.evaluate(arg, mu) == f2.evaluate(arg, mu))
| [((29, 10, 29, 52), 'pymortests.fixtures.parameter.parameters_of_type', 'parameters_of_type', ({(29, 29, 29, 45): 'f.parameter_type', (29, 47, 29, 51): '4711'}, {}), '(f.parameter_type, 4711)', False, 'from pymortests.fixtures.parameter import parameters_of_type\n'), ((62, 4, 62, 30), 'pymortests.pickling.assert_picklable', 'assert_picklable', ({(62, 21, 62, 29): 'function'}, {}), '(function)', False, 'from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function\n'), ((66, 4, 66, 63), 'pymortests.pickling.assert_picklable_without_dumps_function', 'assert_picklable_without_dumps_function', ({(66, 44, 66, 62): 'picklable_function'}, {}), '(picklable_function)', False, 'from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function\n'), ((72, 10, 72, 50), 'pymortests.fixtures.parameter.parameters_of_type', 'parameters_of_type', ({(72, 29, 72, 45): 'f.parameter_type', (72, 47, 72, 49): '47'}, {}), '(f.parameter_type, 47)', False, 'from pymortests.fixtures.parameter import parameters_of_type\n'), ((73, 15, 73, 43), 'pymortests.fixtures.function.function_argument', 'function_argument', ({(73, 33, 73, 34): 'f', (73, 36, 73, 38): '(10)', (73, 40, 73, 42): '(42)'}, {}), '(f, 10, 42)', False, 'from pymortests.fixtures.function import function, picklable_function, function_argument\n'), ((31, 14, 31, 46), 'pymortests.fixtures.function.function_argument', 'function_argument', ({(31, 32, 31, 33): 'f', (31, 35, 31, 40): 'count', (31, 42, 31, 45): '454'}, {}), '(f, count, 454)', False, 'from pymortests.fixtures.function import function, picklable_function, function_argument\n'), ((38, 12, 38, 40), 'numpy.linspace', 'np.linspace', (), '', True, 'import numpy as np\n'), ((39, 15, 39, 54), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', (), '', False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((57, 9, 57, 38), 'pytest.raises', 'pytest.raises', ({(57, 23, 57, 37): 'AssertionError'}, {}), '(AssertionError)', False, 'import pytest\n'), ((58, 8, 58, 38), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', (), '', False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((71, 15, 71, 23), 'pymor.core.pickle.dumps', 'dumps', ({(71, 21, 71, 22): 'f'}, {}), '(f)', False, 'from pymor.core.pickle import dumps, loads\n'), ((40, 21, 40, 60), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', (), '', False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((53, 13, 53, 42), 'pytest.raises', 'pytest.raises', ({(53, 27, 53, 41): 'AssertionError'}, {}), '(AssertionError)', False, 'import pytest\n'), ((55, 13, 55, 42), 'pytest.raises', 'pytest.raises', ({(55, 27, 55, 41): 'AssertionError'}, {}), '(AssertionError)', False, 'import pytest\n'), ((42, 24, 42, 63), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', (), '', False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((54, 19, 54, 57), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', (), '', False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((56, 19, 56, 53), 'pymor.functions.basic.ConstantFunction', 'ConstantFunction', (), '', False, 'from pymor.functions.basic import ConstantFunction, GenericFunction\n'), ((41, 47, 41, 69), 'numpy.zeros', 'np.zeros', ({(41, 56, 41, 68): 'X.shape[:-1]'}, {}), '(X.shape[:-1])', True, 'import numpy as np\n'), ((45, 37, 45, 49), 'numpy.zeros', 'np.zeros', ({(45, 46, 45, 48): '()'}, {}), '(())', True, 'import numpy as np\n'), ((43, 50, 43, 71), 'numpy.ones', 'np.ones', ({(43, 58, 43, 70): 'X.shape[:-1]'}, {}), '(X.shape[:-1])', True, 'import numpy as np\n')] |
CarberZ/social-media-mining | Code/userIDCrawler.py | 41aee64a41244a0692987b75b30dedbd0552be49 |
'''
step 1
get the userID and their locations
put them all into a database
'''
from bs4 import BeautifulSoup
import urllib
import sqlite3
from selenium import webdriver
import time
import re
from urllib import request
import random
import pickle
import os
import pytesseract
url_dog = "https://www.douban.com/group/lovelydog/members?start="
url_cat = "https://www.douban.com/group/cat/members?start="
'''
cat = 1 ~ 336770
dog = 1 ~ 156240
'''
class getInfo(object):
memberList = []
type = None
url = None
memberNumber = 0
conn = None
cursor = None
def __init__(self, type):
getInfo.type = type
if type == "cat":
getInfo.url = url_cat
getInfo.memberNumber = 336770
else:
getInfo.url = url_dog
getInfo.memberNumber = 156240
dbName = "CDPeopleDB.sqlite"
#iniate the start point
if not os.path.isfile('stopPoint.pickle'):
with open('stopPoint.pickle', 'rb') as file:
pickle.dump(1, file)
conn = sqlite3.connect(dbName)
getInfo.conn = conn
getInfo.cursor = getInfo.conn.cursor()
# if getInfo.type == 'dog':
# getInfo.cursor.execute("drop table if exists DogPeople")
# getInfo.cursor.execute("create table DogPeople(id varchar(48), location varchar(48))")
# else:
# getInfo.cursor.execute("drop table if exists CatPeople")
# getInfo.cursor.execute("create table CatPeople(id varchar(48), location varchar(48))")
def sliceContent(self, pageContent):
pageContent = re.sub(r"<ul>(.*)</ul>", "\\1", pageContent.replace("\n", ""))
# print(pageContent)
memberList = re.sub(r'<li class=""> (.*?) </li>', "\\1mark", pageContent.strip())
memberList = re.split(r"mark", memberList)
inforContent = re.findall(r'<div class="name">(.*?)</div>', memberList[35])
for member in memberList:
if member.strip() != '':
inforContent = re.findall(r'<div class="name">(.*?)</div>', member)
if len(inforContent)!= 0:
inforContent = inforContent[0].strip()
identity = re.findall(r'https://www.douban.com/people/(.*?)/', inforContent)[0]
if len(identity)!=0:
id = identity[0]
location = re.findall(r'<span class="pl">\((.*?)\)</span>', inforContent)
if len(location) != 0:
coordinate = str(location[0])
else:
coordinate = 'Unknown'
else:
continue
if getInfo.type == 'dog':
getInfo.cursor.execute("insert into DogPeople values(?, ?)", (id, coordinate))
else:
getInfo.cursor.execute("insert into CatPeople values(?, ?)", (id, coordinate))
getInfo.conn.commit()
def crawler(self):
opener = urllib.request.build_opener(urllib.request.HTTPSHandler)
header = ("User-Agent",
" Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
opener.addheaders = [header]
driver = webdriver.Chrome()
driver.get(getInfo.url)
time.sleep(20)
#store the current position in case there is something wrong with the crawlering
with open('stopPoint.pickle', 'rb') as file:
startPoint = pickle.load(file)
#use the record to be the start position
for i in range(startPoint, getInfo.memberNumber, 35):
driver.get(getInfo.url+str(i))
page = driver.page_source
soup = BeautifulSoup(page, "html5lib")
print(i)
with open('stopPoint.pickle', 'wb') as file:
pickle.dump(i, file)
memberList = soup.find('div', {'class': 'member-list'}).ul
content = str(memberList)
getInfo.sliceContent(self, pageContent=content)
time.sleep(2+random.random())
# info_dog = getInfo("dog")
# info_dog.crawler()
info_cat = getInfo("cat")
info_cat.crawler()
'''
create table CatPeople
as
select distinct *
from CatPeople_backup
WHERE not location GLOB '*[A-Za-z]*';
pre-processing to delete locations out of China
'''
| [((58, 15, 58, 38), 'sqlite3.connect', 'sqlite3.connect', ({(58, 31, 58, 37): 'dbName'}, {}), '(dbName)', False, 'import sqlite3\n'), ((76, 21, 76, 50), 're.split', 're.split', ({(76, 30, 76, 37): '"""mark"""', (76, 39, 76, 49): 'memberList'}, {}), "('mark', memberList)", False, 'import re\n'), ((77, 23, 77, 83), 're.findall', 're.findall', ({(77, 34, 77, 66): '"""<div class="name">(.*?)</div>"""', (77, 68, 77, 82): 'memberList[35]'}, {}), '(\'<div class="name">(.*?)</div>\', memberList[35])', False, 'import re\n'), ((106, 17, 106, 73), 'urllib.request.build_opener', 'urllib.request.build_opener', ({(106, 45, 106, 72): 'urllib.request.HTTPSHandler'}, {}), '(urllib.request.HTTPSHandler)', False, 'import urllib\n'), ((110, 17, 110, 35), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ({}, {}), '()', False, 'from selenium import webdriver\n'), ((112, 8, 112, 22), 'time.sleep', 'time.sleep', ({(112, 19, 112, 21): '(20)'}, {}), '(20)', False, 'import time\n'), ((54, 15, 54, 49), 'os.path.isfile', 'os.path.isfile', ({(54, 30, 54, 48): '"""stopPoint.pickle"""'}, {}), "('stopPoint.pickle')", False, 'import os\n'), ((116, 25, 116, 42), 'pickle.load', 'pickle.load', ({(116, 37, 116, 41): 'file'}, {}), '(file)', False, 'import pickle\n'), ((122, 19, 122, 50), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(122, 33, 122, 37): 'page', (122, 39, 122, 49): '"""html5lib"""'}, {}), "(page, 'html5lib')", False, 'from bs4 import BeautifulSoup\n'), ((56, 16, 56, 36), 'pickle.dump', 'pickle.dump', ({(56, 28, 56, 29): '(1)', (56, 31, 56, 35): 'file'}, {}), '(1, file)', False, 'import pickle\n'), ((80, 31, 80, 83), 're.findall', 're.findall', ({(80, 42, 80, 74): '"""<div class="name">(.*?)</div>"""', (80, 76, 80, 82): 'member'}, {}), '(\'<div class="name">(.*?)</div>\', member)', False, 'import re\n'), ((125, 16, 125, 36), 'pickle.dump', 'pickle.dump', ({(125, 28, 125, 29): 'i', (125, 31, 125, 35): 'file'}, {}), '(i, file)', False, 'import pickle\n'), ((129, 25, 129, 40), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n'), ((83, 31, 83, 96), 're.findall', 're.findall', ({(83, 42, 83, 81): '"""https://www.douban.com/people/(.*?)/"""', (83, 83, 83, 95): 'inforContent'}, {}), "('https://www.douban.com/people/(.*?)/', inforContent)", False, 'import re\n'), ((86, 35, 86, 97), 're.findall', 're.findall', ({(86, 46, 86, 82): '"""<span class="pl">\\\\((.*?)\\\\)</span>"""', (86, 84, 86, 96): 'inforContent'}, {}), '(\'<span class="pl">\\\\((.*?)\\\\)</span>\', inforContent)', False, 'import re\n')] |
saarkatz/guppy-struct | src/stoat/core/structure/__init__.py | b9099353312c365cfd788dbd2d168a9c844765be | from .structure import Structure
| [] |
iminders/TradeBaselines | tbase/network/polices_test.py | 26eb87f2bcd5f6ff479149219b38b17002be6a40 | import unittest
import numpy as np
from tbase.common.cmd_util import set_global_seeds
from tbase.network.polices import RandomPolicy
class TestPolices(unittest.TestCase):
@classmethod
def setUpClass(self):
set_global_seeds(0)
def test_random_policy(self):
policy = RandomPolicy(2)
# action 1
actual = policy.select_action([])
expected = [1.0, -0.2534131770209437]
self.assertEqual(expected, list(actual.astype(np.float)))
# action 2
actual = policy.select_action([])
expected = [-1.0, 0.8324962832376306]
self.assertEqual(expected, list(actual.astype(np.float)))
if __name__ == '__main__':
unittest.main()
| [((27, 4, 27, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((12, 8, 12, 27), 'tbase.common.cmd_util.set_global_seeds', 'set_global_seeds', ({(12, 25, 12, 26): '(0)'}, {}), '(0)', False, 'from tbase.common.cmd_util import set_global_seeds\n'), ((15, 17, 15, 32), 'tbase.network.polices.RandomPolicy', 'RandomPolicy', ({(15, 30, 15, 31): '2'}, {}), '(2)', False, 'from tbase.network.polices import RandomPolicy\n')] |
knikolla/keystone | keystone/tests/unit/core.py | 50f0a50cf4d52d3f61b64713bd4faa7a4626ae53 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import base64
import contextlib
import datetime
import functools
import hashlib
import json
import ldap
import os
import shutil
import socket
import sys
import uuid
import warnings
import fixtures
import flask
from flask import testing as flask_testing
import http.client
from oslo_config import fixture as config_fixture
from oslo_context import context as oslo_context
from oslo_context import fixture as oslo_ctx_fixture
from oslo_log import fixture as log_fixture
from oslo_log import log
from oslo_utils import timeutils
from sqlalchemy import exc
import testtools
from testtools import testcase
import keystone.api
from keystone.common import context
from keystone.common import json_home
from keystone.common import provider_api
from keystone.common import sql
import keystone.conf
from keystone import exception
from keystone.identity.backends.ldap import common as ks_ldap
from keystone import notifications
from keystone.resource.backends import base as resource_base
from keystone.server.flask import application as flask_app
from keystone.server.flask import core as keystone_flask
from keystone.tests.unit import ksfixtures
keystone.conf.configure()
keystone.conf.set_config_defaults()
PID = str(os.getpid())
TESTSDIR = os.path.dirname(os.path.abspath(__file__))
TESTCONF = os.path.join(TESTSDIR, 'config_files')
ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..'))
VENDOR = os.path.join(ROOTDIR, 'vendor')
ETCDIR = os.path.join(ROOTDIR, 'etc')
def _calc_tmpdir():
env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR')
if not env_val:
return os.path.join(TESTSDIR, 'tmp', PID)
return os.path.join(env_val, PID)
TMPDIR = _calc_tmpdir()
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
log.register_options(CONF)
IN_MEM_DB_CONN_STRING = 'sqlite://'
# Strictly matches ISO 8601 timestamps with subsecond precision like:
# 2016-06-28T20:48:56.000000Z
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
TIME_FORMAT_REGEX = r'^\d{4}-[0-1]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d{6}Z$'
exception._FATAL_EXCEPTION_FORMAT_ERRORS = True
os.makedirs(TMPDIR)
atexit.register(shutil.rmtree, TMPDIR)
class dirs(object):
@staticmethod
def root(*p):
return os.path.join(ROOTDIR, *p)
@staticmethod
def etc(*p):
return os.path.join(ETCDIR, *p)
@staticmethod
def tests(*p):
return os.path.join(TESTSDIR, *p)
@staticmethod
def tmp(*p):
return os.path.join(TMPDIR, *p)
@staticmethod
def tests_conf(*p):
return os.path.join(TESTCONF, *p)
@atexit.register
def remove_test_databases():
db = dirs.tmp('test.db')
if os.path.exists(db):
os.unlink(db)
pristine = dirs.tmp('test.db.pristine')
if os.path.exists(pristine):
os.unlink(pristine)
def skip_if_cache_disabled(*sections):
"""Skip a test if caching is disabled, this is a decorator.
Caching can be disabled either globally or for a specific section.
In the code fragment::
@skip_if_cache_is_disabled('assignment', 'token')
def test_method(*args):
...
The method test_method would be skipped if caching is disabled globally via
the `enabled` option in the `cache` section of the configuration or if
the `caching` option is set to false in either `assignment` or `token`
sections of the configuration. This decorator can be used with no
arguments to only check global caching.
If a specified configuration section does not define the `caching` option,
this decorator makes the caching enabled if `enabled` option in the `cache`
section of the configuration is true.
"""
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.cache.enabled:
raise testcase.TestSkipped('Cache globally disabled.')
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if not getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching disabled.' % s)
return f(*args, **kwargs)
return inner
return wrapper
def skip_if_cache_is_enabled(*sections):
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if CONF.cache.enabled:
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching enabled.' %
s)
return f(*args, **kwargs)
return inner
return wrapper
def skip_if_no_multiple_domains_support(f):
"""Decorator to skip tests for identity drivers limited to one domain."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_obj = args[0]
if not test_obj.identity_api.multiple_domains_supported:
raise testcase.TestSkipped('No multiple domains support')
return f(*args, **kwargs)
return wrapper
class UnexpectedExit(Exception):
pass
def new_region_ref(parent_region_id=None, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'parent_region_id': parent_region_id}
ref.update(kwargs)
return ref
def new_service_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
return ref
NEEDS_REGION_ID = object()
def new_endpoint_ref(service_id, interface='public',
region_id=NEEDS_REGION_ID, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'interface': interface,
'service_id': service_id,
'url': 'https://' + uuid.uuid4().hex + '.com',
}
if region_id is NEEDS_REGION_ID:
ref['region_id'] = uuid.uuid4().hex
elif region_id is None and kwargs.get('region') is not None:
# pre-3.2 form endpoints are not supported by this function
raise NotImplementedError("use new_endpoint_ref_with_region")
else:
ref['region_id'] = region_id
ref.update(kwargs)
return ref
def new_endpoint_group_ref(filters, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'filters': filters,
'name': uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def new_endpoint_ref_with_region(service_id, region, interface='public',
**kwargs):
"""Define an endpoint_ref having a pre-3.2 form.
Contains the deprecated 'region' instead of 'region_id'.
"""
ref = new_endpoint_ref(service_id, interface, region=region,
region_id='invalid', **kwargs)
del ref['region_id']
return ref
def new_domain_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
'tags': [],
'options': {}
}
ref.update(kwargs)
return ref
def new_project_ref(domain_id=None, is_domain=False, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
'domain_id': domain_id,
'is_domain': is_domain,
'tags': [],
'options': {}
}
# NOTE(henry-nash): We don't include parent_id in the initial list above
# since specifying it is optional depending on where the project sits in
# the hierarchy (and a parent_id of None has meaning - i.e. it's a top
# level project).
ref.update(kwargs)
return ref
def new_user_ref(domain_id, project_id=None, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True,
'domain_id': domain_id,
'email': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
}
if project_id:
ref['default_project_id'] = project_id
ref.update(kwargs)
return ref
def new_federated_user_ref(idp_id=None, protocol_id=None, **kwargs):
ref = {
'idp_id': idp_id or 'ORG_IDP',
'protocol_id': protocol_id or 'saml2',
'unique_id': uuid.uuid4().hex,
'display_name': uuid.uuid4().hex,
}
ref.update(kwargs)
return ref
def new_mapping_ref(mapping_id=None, rules=None, **kwargs):
ref = {
'id': mapping_id or uuid.uuid4().hex,
'rules': rules or []
}
ref.update(kwargs)
return ref
def new_protocol_ref(protocol_id=None, idp_id=None, mapping_id=None, **kwargs):
ref = {
'id': protocol_id or 'saml2',
'idp_id': idp_id or 'ORG_IDP',
'mapping_id': mapping_id or uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def new_identity_provider_ref(idp_id=None, **kwargs):
ref = {
'id': idp_id or 'ORG_IDP',
'enabled': True,
'description': '',
}
ref.update(kwargs)
return ref
def new_service_provider_ref(**kwargs):
ref = {
'auth_url': 'https://' + uuid.uuid4().hex + '.com',
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': 'https://' + uuid.uuid4().hex + '.com',
'relay_state_prefix': CONF.saml.relay_state_prefix
}
ref.update(kwargs)
return ref
def new_group_ref(domain_id, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': domain_id
}
ref.update(kwargs)
return ref
def new_credential_ref(user_id, project_id=None, type='cert', **kwargs):
ref = {
'id': uuid.uuid4().hex,
'user_id': user_id,
'type': type,
}
if project_id:
ref['project_id'] = project_id
if 'blob' not in kwargs:
ref['blob'] = uuid.uuid4().hex
ref.update(kwargs)
return ref
def new_cert_credential(user_id, project_id=None, blob=None, **kwargs):
if blob is None:
blob = {'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex}
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=json.dumps(blob),
type='cert',
**kwargs)
return blob, credential
def new_ec2_credential(user_id, project_id=None, blob=None, **kwargs):
if blob is None:
blob = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'trust_id': None
}
if 'id' not in kwargs:
access = blob['access'].encode('utf-8')
kwargs['id'] = hashlib.sha256(access).hexdigest()
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=json.dumps(blob),
type='ec2',
**kwargs)
return blob, credential
def new_totp_credential(user_id, project_id=None, blob=None):
if not blob:
# NOTE(notmorgan): 20 bytes of data from os.urandom for
# a totp secret.
blob = base64.b32encode(os.urandom(20)).decode('utf-8')
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=blob,
type='totp')
return credential
def new_application_credential_ref(roles=None,
name=None,
expires=None,
secret=None):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
if roles:
ref['roles'] = roles
if secret:
ref['secret'] = secret
if isinstance(expires, str):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = (
timeutils.utcnow() + datetime.timedelta(**expires)
).strftime(TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
return ref
def new_role_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': None,
'options': {},
}
ref.update(kwargs)
return ref
def new_policy_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
# Store serialized JSON data as the blob to mimic real world usage.
'blob': json.dumps({'data': uuid.uuid4().hex, }),
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
return ref
def new_domain_config_ref(**kwargs):
ref = {
"identity": {
"driver": "ldap"
},
"ldap": {
"url": "ldap://myldap.com:389/",
"user_tree_dn": "ou=Users,dc=my_new_root,dc=org"
}
}
ref.update(kwargs)
return ref
def new_trust_ref(trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None, remaining_uses=None,
allow_redelegation=False, redelegation_count=None, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'trustor_user_id': trustor_user_id,
'trustee_user_id': trustee_user_id,
'impersonation': impersonation or False,
'project_id': project_id,
'remaining_uses': remaining_uses,
'allow_redelegation': allow_redelegation,
}
if isinstance(redelegation_count, int):
ref.update(redelegation_count=redelegation_count)
if isinstance(expires, str):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = (
timeutils.utcnow() + datetime.timedelta(**expires)
).strftime(TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
role_ids = role_ids or []
role_names = role_names or []
if role_ids or role_names:
ref['roles'] = []
for role_id in role_ids:
ref['roles'].append({'id': role_id})
for role_name in role_names:
ref['roles'].append({'name': role_name})
ref.update(kwargs)
return ref
def new_registered_limit_ref(**kwargs):
ref = {
'service_id': uuid.uuid4().hex,
'resource_name': uuid.uuid4().hex,
'default_limit': 10,
'description': uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def new_limit_ref(**kwargs):
ref = {
'service_id': uuid.uuid4().hex,
'resource_name': uuid.uuid4().hex,
'resource_limit': 10,
'description': uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def create_user(api, domain_id, **kwargs):
"""Create a user via the API. Keep the created password.
The password is saved and restored when api.create_user() is called.
Only use this routine if there is a requirement for the user object to
have a valid password after api.create_user() is called.
"""
user = new_user_ref(domain_id=domain_id, **kwargs)
password = user['password']
user = api.create_user(user)
user['password'] = password
return user
def _assert_expected_status(f):
"""Add `expected_status_code` as an argument to the test_client methods.
`expected_status_code` must be passed as a kwarg.
"""
TEAPOT_HTTP_STATUS = 418
_default_expected_responses = {
'get': http.client.OK,
'head': http.client.OK,
'post': http.client.CREATED,
'put': http.client.NO_CONTENT,
'patch': http.client.OK,
'delete': http.client.NO_CONTENT,
}
@functools.wraps(f)
def inner(*args, **kwargs):
# Get the "expected_status_code" kwarg if supplied. If not supplied use
# the `_default_expected_response` mapping, or fall through to
# "HTTP OK" if the method is somehow unknown.
expected_status_code = kwargs.pop(
'expected_status_code',
_default_expected_responses.get(
f.__name__.lower(), http.client.OK))
response = f(*args, **kwargs)
# Logic to verify the response object is sane. Expand as needed
if response.status_code == TEAPOT_HTTP_STATUS:
# NOTE(morgan): We use 418 internally during tests to indicate
# an un-routed HTTP call was made. This allows us to avoid
# misinterpreting HTTP 404 from Flask and HTTP 404 from a
# resource that is not found (e.g. USER NOT FOUND) programmatically
raise AssertionError("I AM A TEAPOT(418): %s" % response.data)
if response.status_code != expected_status_code:
raise AssertionError(
'Expected HTTP Status does not match observed HTTP '
'Status: %(expected)s != %(observed)s (%(data)s)' % {
'expected': expected_status_code,
'observed': response.status_code,
'data': response.data})
# return the original response object
return response
return inner
class KeystoneFlaskTestClient(flask_testing.FlaskClient):
"""Subclass of flask.testing.FlaskClient implementing assertions.
Implements custom "expected" HTTP Status assertion for
GET/HEAD/PUT/PATCH/DELETE.
"""
@_assert_expected_status
def get(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).get(*args, **kwargs)
@_assert_expected_status
def head(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).head(*args, **kwargs)
@_assert_expected_status
def post(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).post(*args, **kwargs)
@_assert_expected_status
def patch(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).patch(*args, **kwargs)
@_assert_expected_status
def put(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).put(*args, **kwargs)
@_assert_expected_status
def delete(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).delete(*args, **kwargs)
class BaseTestCase(testtools.TestCase):
"""Light weight base test class.
This is a placeholder that will eventually go away once the
setup/teardown in TestCase is properly trimmed down to the bare
essentials. This is really just a play to speed up the tests by
eliminating unnecessary work.
"""
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(fixtures.MockPatchObject(sys, 'exit',
side_effect=UnexpectedExit))
self.useFixture(log_fixture.get_logging_handle_error_fixture())
warnings.filterwarnings('error', category=DeprecationWarning,
module='^keystone\\.')
warnings.filterwarnings(
'ignore', category=DeprecationWarning,
message=r"Using function/method 'db_version\(\)' is deprecated")
warnings.simplefilter('error', exc.SAWarning)
if hasattr(exc, "RemovedIn20Warning"):
warnings.simplefilter('ignore', exc.RemovedIn20Warning)
self.addCleanup(warnings.resetwarnings)
# Ensure we have an empty threadlocal context at the start of each
# test.
self.assertIsNone(oslo_context.get_current())
self.useFixture(oslo_ctx_fixture.ClearRequestContext())
orig_debug_level = ldap.get_option(ldap.OPT_DEBUG_LEVEL)
self.addCleanup(ldap.set_option, ldap.OPT_DEBUG_LEVEL,
orig_debug_level)
orig_tls_cacertfile = ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)
if orig_tls_cacertfile is None:
orig_tls_cacertfile = ''
self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_CACERTFILE,
orig_tls_cacertfile)
orig_tls_cacertdir = ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)
# Setting orig_tls_cacertdir to None is not allowed.
if orig_tls_cacertdir is None:
orig_tls_cacertdir = ''
self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_CACERTDIR,
orig_tls_cacertdir)
orig_tls_require_cert = ldap.get_option(ldap.OPT_X_TLS_REQUIRE_CERT)
self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_REQUIRE_CERT,
orig_tls_require_cert)
self.addCleanup(ks_ldap.PooledLDAPHandler.connection_pools.clear)
def cleanup_instance(self, *names):
"""Create a function suitable for use with self.addCleanup.
:returns: a callable that uses a closure to delete instance attributes
"""
def cleanup():
for name in names:
# TODO(dstanek): remove this 'if' statement once
# load_backend in test_backend_ldap is only called once
# per test
if hasattr(self, name):
delattr(self, name)
return cleanup
def skip_if_env_not_set(self, env_var):
if not os.environ.get(env_var):
self.skipTest('Env variable %s is not set.' % env_var)
def skip_test_overrides(self, *args, **kwargs):
if self._check_for_method_in_parents(self._testMethodName):
return super(BaseTestCase, self).skipTest(*args, **kwargs)
raise Exception('%r is not a previously defined test method'
% self._testMethodName)
def _check_for_method_in_parents(self, name):
# skip first to get to parents
for cls in self.__class__.__mro__[1:]:
if hasattr(cls, name):
return True
return False
def loadapp(self, name='public'):
app = flask_app.application_factory(name)
app.testing = True
app.test_client_class = KeystoneFlaskTestClient
# NOTE(morgan): any unexpected 404s, not handled by the routed apis,
# is a hard error and should not pass testing.
def page_not_found_teapot(e):
content = (
'TEST PROGRAMMING ERROR - Reached a 404 from an unrouted (`%s`'
') path. Be sure the test is requesting the right resource '
'and that all blueprints are registered with the flask app.' %
flask.request.url)
return content, 418
app.register_error_handler(404, page_not_found_teapot)
self.test_client = app.test_client
self.test_request_context = app.test_request_context
self.cleanup_instance('test_request_context')
self.cleanup_instance('test_client')
return keystone_flask.setup_app_middleware(app)
class TestCase(BaseTestCase):
def config_files(self):
return []
def _policy_fixture(self):
return ksfixtures.Policy(self.config_fixture)
@contextlib.contextmanager
def make_request(self, path='/', **kwargs):
# standup a fake app and request context with a passed in/known
# environment.
is_admin = kwargs.pop('is_admin', False)
environ = kwargs.setdefault('environ', {})
query_string = kwargs.pop('query_string', None)
if query_string:
# Make sure query string is properly added to the context
path = '{path}?{qs}'.format(path=path, qs=query_string)
if not environ.get(context.REQUEST_CONTEXT_ENV):
environ[context.REQUEST_CONTEXT_ENV] = context.RequestContext(
is_admin=is_admin,
authenticated=kwargs.pop('authenticated', True))
# Create a dummy flask app to work with
app = flask.Flask(__name__)
with app.test_request_context(path=path, environ_overrides=environ):
yield
def config_overrides(self):
# NOTE(morganfainberg): enforce config_overrides can only ever be
# called a single time.
assert self.__config_overrides_called is False
self.__config_overrides_called = True
signing_certfile = 'examples/pki/certs/signing_cert.pem'
signing_keyfile = 'examples/pki/private/signing_key.pem'
self.useFixture(self._policy_fixture())
self.config_fixture.config(
# TODO(morganfainberg): Make Cache Testing a separate test case
# in tempest, and move it out of the base unit tests.
group='cache',
backend='dogpile.cache.memory',
enabled=True,
proxies=['oslo_cache.testing.CacheIsolatingProxy'])
self.config_fixture.config(
group='catalog',
driver='sql',
template_file=dirs.tests('default_catalog.templates'))
self.config_fixture.config(
group='saml', certfile=signing_certfile, keyfile=signing_keyfile)
self.config_fixture.config(
default_log_levels=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'routes.middleware=INFO',
'stevedore.extension=INFO',
'keystone.notifications=INFO',
'keystone.identity.backends.ldap.common=INFO',
])
# NOTE(notmorgan): Set password rounds low here to ensure speedy
# tests. This is explicitly set because the tests here are not testing
# the integrity of the password hashing, just that the correct form
# of hashing has been used. Note that 4 is the lowest for bcrypt
# allowed in the `[identity] password_hash_rounds` setting
self.config_fixture.config(group='identity', password_hash_rounds=4)
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_receipts',
CONF.fernet_receipts.max_active_keys
)
)
def _assert_config_overrides_called(self):
assert self.__config_overrides_called is True
def setUp(self):
super(TestCase, self).setUp()
self.__config_overrides_called = False
self.__load_backends_called = False
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
self.addCleanup(delattr, self, 'config_fixture')
self.config(self.config_files())
# NOTE(morganfainberg): mock the auth plugin setup to use the config
# fixture which automatically unregisters options when performing
# cleanup.
def mocked_register_auth_plugin_opt(conf, opt):
self.config_fixture.register_opt(opt, group='auth')
self.useFixture(fixtures.MockPatchObject(
keystone.conf.auth, '_register_auth_plugin_opt',
new=mocked_register_auth_plugin_opt))
self.config_overrides()
# explicitly load auth configuration
keystone.conf.auth.setup_authentication()
# NOTE(morganfainberg): ensure config_overrides has been called.
self.addCleanup(self._assert_config_overrides_called)
self.useFixture(fixtures.FakeLogger(level=log.DEBUG))
# NOTE(morganfainberg): This code is a copy from the oslo-incubator
# log module. This is not in a function or otherwise available to use
# without having a CONF object to setup logging. This should help to
# reduce the log size by limiting what we log (similar to how Keystone
# would run under mod_wsgi).
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = log.getLogger(mod)
logger.logger.setLevel(level_name)
self.useFixture(ksfixtures.Cache())
# Clear the registry of providers so that providers from previous
# tests aren't used.
self.addCleanup(provider_api.ProviderAPIs._clear_registry_instances)
# Clear the registry of JSON Home Resources
self.addCleanup(json_home.JsonHomeResources._reset)
# Ensure Notification subscriptions and resource types are empty
self.addCleanup(notifications.clear_subscribers)
self.addCleanup(notifications.reset_notifier)
def config(self, config_files):
sql.initialize()
CONF(args=[], project='keystone', default_config_files=config_files)
def load_backends(self):
"""Initialize each manager and assigns them to an attribute."""
# TODO(morgan): Ensure our tests only ever call load_backends
# a single time via this method. for now just clear the registry
# if we are reloading.
provider_api.ProviderAPIs._clear_registry_instances()
self.useFixture(ksfixtures.BackendLoader(self))
def load_fixtures(self, fixtures):
"""Hacky basic and naive fixture loading based on a python module.
Expects that the various APIs into the various services are already
defined on `self`.
"""
# NOTE(dstanek): create a list of attribute names to be removed
# from this instance during cleanup
fixtures_to_cleanup = []
# TODO(termie): doing something from json, probably based on Django's
# loaddata will be much preferred.
if (hasattr(self, 'identity_api') and
hasattr(self, 'assignment_api') and
hasattr(self, 'resource_api')):
try:
PROVIDERS.resource_api.create_domain(
resource_base.NULL_DOMAIN_ID, fixtures.ROOT_DOMAIN)
except exception.Conflict:
# the root domain already exists, skip now.
pass
for domain in fixtures.DOMAINS:
rv = PROVIDERS.resource_api.create_domain(domain['id'], domain)
attrname = 'domain_%s' % domain['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for project in fixtures.PROJECTS:
project_attr_name = 'project_%s' % project['name'].lower()
rv = PROVIDERS.resource_api.create_project(
project['id'], project)
setattr(self, project_attr_name, rv)
fixtures_to_cleanup.append(project_attr_name)
for role in fixtures.ROLES:
rv = PROVIDERS.role_api.create_role(role['id'], role)
attrname = 'role_%s' % role['name']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for user in fixtures.USERS:
user_copy = user.copy()
projects = user_copy.pop('projects')
# For users, the manager layer will generate the ID
user_copy = PROVIDERS.identity_api.create_user(user_copy)
# Our tests expect that the password is still in the user
# record so that they can reference it, so put it back into
# the dict returned.
user_copy['password'] = user['password']
# fixtures.ROLES[2] is the _member_ role.
for project_id in projects:
PROVIDERS.assignment_api.add_role_to_user_and_project(
user_copy['id'], project_id, fixtures.ROLES[2]['id'])
# Use the ID from the fixture as the attribute name, so
# that our tests can easily reference each user dict, while
# the ID in the dict will be the real public ID.
attrname = 'user_%s' % user['name']
setattr(self, attrname, user_copy)
fixtures_to_cleanup.append(attrname)
for role_assignment in fixtures.ROLE_ASSIGNMENTS:
role_id = role_assignment['role_id']
user = role_assignment['user']
project_id = role_assignment['project_id']
user_id = getattr(self, 'user_%s' % user)['id']
PROVIDERS.assignment_api.add_role_to_user_and_project(
user_id, project_id, role_id)
self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup))
def assertCloseEnoughForGovernmentWork(self, a, b, delta=3):
"""Assert that two datetimes are nearly equal within a small delta.
:param delta: Maximum allowable time delta, defined in seconds.
"""
if a == b:
# Short-circuit if the values are the same.
return
msg = '%s != %s within %s delta' % (a, b, delta)
self.assertLessEqual(abs(a - b).seconds, delta, msg)
def assertTimestampEqual(self, expected, value):
# Compare two timestamps but ignore the microseconds part
# of the expected timestamp. Keystone does not track microseconds and
# is working to eliminate microseconds from it's datetimes used.
expected = timeutils.parse_isotime(expected).replace(microsecond=0)
value = timeutils.parse_isotime(value).replace(microsecond=0)
self.assertEqual(
expected,
value,
"%s != %s" % (expected, value))
def assertNotEmpty(self, l):
self.assertGreater(len(l), 0)
def assertUserDictEqual(self, expected, observed, message=''):
"""Assert that a user dict is equal to another user dict.
User dictionaries have some variable values that should be ignored in
the comparison. This method is a helper that strips those elements out
when comparing the user dictionary. This normalized these differences
that should not change the comparison.
"""
# NOTE(notmorgan): An empty option list is the same as no options being
# specified in the user_ref. This removes options if it is empty in
# observed if options is not specified in the expected value.
if ('options' in observed and not observed['options'] and
'options' not in expected):
observed = observed.copy()
del observed['options']
self.assertDictEqual(expected, observed, message)
@property
def ipv6_enabled(self):
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6)
# NOTE(Mouad): Try to bind to IPv6 loopback ip address.
sock.bind(("::1", 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
def skip_if_no_ipv6(self):
if not self.ipv6_enabled:
raise self.skipTest("IPv6 is not enabled in the system")
class SQLDriverOverrides(object):
"""A mixin for consolidating sql-specific test overrides."""
def config_overrides(self):
super(SQLDriverOverrides, self).config_overrides()
# SQL specific driver overrides
self.config_fixture.config(group='catalog', driver='sql')
self.config_fixture.config(group='identity', driver='sql')
self.config_fixture.config(group='policy', driver='sql')
self.config_fixture.config(group='trust', driver='sql')
| [((63, 11, 63, 49), 'os.path.join', 'os.path.join', ({(63, 24, 63, 32): 'TESTSDIR', (63, 34, 63, 48): '"""config_files"""'}, {}), "(TESTSDIR, 'config_files')", False, 'import os\n'), ((65, 9, 65, 40), 'os.path.join', 'os.path.join', ({(65, 22, 65, 29): 'ROOTDIR', (65, 31, 65, 39): '"""vendor"""'}, {}), "(ROOTDIR, 'vendor')", False, 'import os\n'), ((66, 9, 66, 37), 'os.path.join', 'os.path.join', ({(66, 22, 66, 29): 'ROOTDIR', (66, 31, 66, 36): '"""etc"""'}, {}), "(ROOTDIR, 'etc')", False, 'import os\n'), ((80, 0, 80, 26), 'oslo_log.log.register_options', 'log.register_options', ({(80, 21, 80, 25): 'CONF'}, {}), '(CONF)', False, 'from oslo_log import log\n'), ((90, 0, 90, 19), 'os.makedirs', 'os.makedirs', ({(90, 12, 90, 18): 'TMPDIR'}, {}), '(TMPDIR)', False, 'import os\n'), ((91, 0, 91, 38), 'atexit.register', 'atexit.register', ({(91, 16, 91, 29): 'shutil.rmtree', (91, 31, 91, 37): 'TMPDIR'}, {}), '(shutil.rmtree, TMPDIR)', False, 'import atexit\n'), ((61, 10, 61, 21), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n'), ((62, 27, 62, 52), 'os.path.abspath', 'os.path.abspath', ({(62, 43, 62, 51): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((64, 27, 64, 67), 'os.path.join', 'os.path.join', ({(64, 40, 64, 48): 'TESTSDIR', (64, 50, 64, 54): '""".."""', (64, 56, 64, 60): '""".."""', (64, 62, 64, 66): '""".."""'}, {}), "(TESTSDIR, '..', '..', '..')", False, 'import os\n'), ((70, 14, 70, 54), 'os.environ.get', 'os.environ.get', ({(70, 29, 70, 53): '"""KEYSTONE_TEST_TEMP_DIR"""'}, {}), "('KEYSTONE_TEST_TEMP_DIR')", False, 'import os\n'), ((73, 11, 73, 37), 'os.path.join', 'os.path.join', ({(73, 24, 73, 31): 'env_val', (73, 33, 73, 36): 'PID'}, {}), '(env_val, PID)', False, 'import os\n'), ((119, 7, 119, 25), 'os.path.exists', 'os.path.exists', ({(119, 22, 119, 24): 'db'}, {}), '(db)', False, 'import os\n'), ((122, 7, 122, 31), 'os.path.exists', 'os.path.exists', ({(122, 22, 122, 30): 'pristine'}, {}), '(pristine)', False, 'import os\n'), ((181, 5, 181, 23), 'functools.wraps', 'functools.wraps', ({(181, 21, 181, 22): 'f'}, {}), '(f)', False, 'import functools\n'), ((599, 5, 599, 23), 'functools.wraps', 'functools.wraps', ({(599, 21, 599, 22): 'f'}, {}), '(f)', False, 'import functools\n'), ((72, 15, 72, 49), 'os.path.join', 'os.path.join', ({(72, 28, 72, 36): 'TESTSDIR', (72, 38, 72, 43): '"""tmp"""', (72, 45, 72, 48): 'PID'}, {}), "(TESTSDIR, 'tmp', PID)", False, 'import os\n'), ((97, 15, 97, 40), 'os.path.join', 'os.path.join', ({(97, 28, 97, 35): 'ROOTDIR', (97, 37, 97, 39): '*p'}, {}), '(ROOTDIR, *p)', False, 'import os\n'), ((101, 15, 101, 39), 'os.path.join', 'os.path.join', ({(101, 28, 101, 34): 'ETCDIR', (101, 36, 101, 38): '*p'}, {}), '(ETCDIR, *p)', False, 'import os\n'), ((105, 15, 105, 41), 'os.path.join', 'os.path.join', ({(105, 28, 105, 36): 'TESTSDIR', (105, 38, 105, 40): '*p'}, {}), '(TESTSDIR, *p)', False, 'import os\n'), ((109, 15, 109, 39), 'os.path.join', 'os.path.join', ({(109, 28, 109, 34): 'TMPDIR', (109, 36, 109, 38): '*p'}, {}), '(TMPDIR, *p)', False, 'import os\n'), ((113, 15, 113, 41), 'os.path.join', 'os.path.join', ({(113, 28, 113, 36): 'TESTCONF', (113, 38, 113, 40): '*p'}, {}), '(TESTCONF, *p)', False, 'import os\n'), ((120, 8, 120, 21), 'os.unlink', 'os.unlink', ({(120, 18, 120, 20): 'db'}, {}), '(db)', False, 'import os\n'), ((123, 8, 123, 27), 'os.unlink', 'os.unlink', ({(123, 18, 123, 26): 'pristine'}, {}), '(pristine)', False, 'import os\n'), ((149, 9, 149, 27), 'functools.wraps', 'functools.wraps', ({(149, 25, 149, 26): 'f'}, {}), '(f)', False, 'import functools\n'), ((165, 9, 165, 27), 'functools.wraps', 'functools.wraps', ({(165, 25, 165, 26): 'f'}, {}), '(f)', False, 'import functools\n'), ((682, 8, 683, 54), 'warnings.filterwarnings', 'warnings.filterwarnings', (), '', False, 'import warnings\n'), ((684, 8, 686, 76), 'warnings.filterwarnings', 'warnings.filterwarnings', (), '', False, 'import warnings\n'), ((687, 8, 687, 53), 'warnings.simplefilter', 'warnings.simplefilter', ({(687, 30, 687, 37): '"""error"""', (687, 39, 687, 52): 'exc.SAWarning'}, {}), "('error', exc.SAWarning)", False, 'import warnings\n'), ((697, 27, 697, 64), 'ldap.get_option', 'ldap.get_option', ({(697, 43, 697, 63): 'ldap.OPT_DEBUG_LEVEL'}, {}), '(ldap.OPT_DEBUG_LEVEL)', False, 'import ldap\n'), ((700, 30, 700, 72), 'ldap.get_option', 'ldap.get_option', ({(700, 46, 700, 71): 'ldap.OPT_X_TLS_CACERTFILE'}, {}), '(ldap.OPT_X_TLS_CACERTFILE)', False, 'import ldap\n'), ((705, 29, 705, 70), 'ldap.get_option', 'ldap.get_option', ({(705, 45, 705, 69): 'ldap.OPT_X_TLS_CACERTDIR'}, {}), '(ldap.OPT_X_TLS_CACERTDIR)', False, 'import ldap\n'), ((711, 32, 711, 76), 'ldap.get_option', 'ldap.get_option', ({(711, 48, 711, 75): 'ldap.OPT_X_TLS_REQUIRE_CERT'}, {}), '(ldap.OPT_X_TLS_REQUIRE_CERT)', False, 'import ldap\n'), ((749, 14, 749, 49), 'keystone.server.flask.application.application_factory', 'flask_app.application_factory', ({(749, 44, 749, 48): 'name'}, {}), '(name)', True, 'from keystone.server.flask import application as flask_app\n'), ((769, 15, 769, 55), 'keystone.server.flask.core.setup_app_middleware', 'keystone_flask.setup_app_middleware', ({(769, 51, 769, 54): 'app'}, {}), '(app)', True, 'from keystone.server.flask import core as keystone_flask\n'), ((778, 15, 778, 53), 'keystone.tests.unit.ksfixtures.Policy', 'ksfixtures.Policy', ({(778, 33, 778, 52): 'self.config_fixture'}, {}), '(self.config_fixture)', False, 'from keystone.tests.unit import ksfixtures\n'), ((798, 14, 798, 35), 'flask.Flask', 'flask.Flask', ({(798, 26, 798, 34): '__name__'}, {}), '(__name__)', False, 'import flask\n'), ((917, 8, 917, 24), 'keystone.common.sql.initialize', 'sql.initialize', ({}, {}), '()', False, 'from keystone.common import sql\n'), ((925, 8, 925, 61), 'keystone.common.provider_api.ProviderAPIs._clear_registry_instances', 'provider_api.ProviderAPIs._clear_registry_instances', ({}, {}), '()', False, 'from keystone.common import provider_api\n'), ((185, 18, 185, 69), 'testtools.testcase.TestSkipped', 'testcase.TestSkipped', ({(185, 39, 185, 68): '"""No multiple domains support"""'}, {}), "('No multiple domains support')", False, 'from testtools import testcase\n'), ((196, 14, 196, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((197, 23, 197, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((206, 14, 206, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((207, 16, 207, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((208, 23, 208, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((210, 16, 210, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((223, 14, 223, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((224, 16, 224, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((225, 23, 225, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((232, 27, 232, 39), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((244, 14, 244, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((245, 23, 245, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((247, 16, 247, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((267, 14, 267, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((268, 16, 268, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((269, 23, 269, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((280, 14, 280, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((281, 16, 281, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((282, 23, 282, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((299, 14, 299, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((300, 16, 300, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((303, 17, 303, 29), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((304, 20, 304, 32), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((316, 21, 316, 33), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((317, 24, 317, 36), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((356, 23, 356, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((366, 14, 366, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((367, 16, 367, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((368, 23, 368, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((377, 14, 377, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((385, 22, 385, 34), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((397, 41, 397, 57), 'json.dumps', 'json.dumps', ({(397, 52, 397, 56): 'blob'}, {}), '(blob)', False, 'import json\n'), ((417, 41, 417, 57), 'json.dumps', 'json.dumps', ({(417, 52, 417, 56): 'blob'}, {}), '(blob)', False, 'import json\n'), ((440, 14, 440, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((441, 16, 441, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((442, 23, 442, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((465, 14, 465, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((466, 16, 466, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((467, 23, 467, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((477, 14, 477, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((478, 16, 478, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((479, 23, 479, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((483, 16, 483, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((509, 14, 509, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((547, 22, 547, 34), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((548, 25, 548, 37), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((550, 23, 550, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((559, 22, 559, 34), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((560, 25, 560, 37), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((562, 23, 562, 35), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((675, 24, 675, 49), 'fixtures.NestedTempfile', 'fixtures.NestedTempfile', ({}, {}), '()', False, 'import fixtures\n'), ((676, 24, 676, 46), 'fixtures.TempHomeDir', 'fixtures.TempHomeDir', ({}, {}), '()', False, 'import fixtures\n'), ((678, 24, 679, 76), 'fixtures.MockPatchObject', 'fixtures.MockPatchObject', (), '', False, 'import fixtures\n'), ((680, 24, 680, 70), 'oslo_log.fixture.get_logging_handle_error_fixture', 'log_fixture.get_logging_handle_error_fixture', ({}, {}), '()', True, 'from oslo_log import fixture as log_fixture\n'), ((689, 12, 689, 67), 'warnings.simplefilter', 'warnings.simplefilter', ({(689, 34, 689, 42): '"""ignore"""', (689, 44, 689, 66): 'exc.RemovedIn20Warning'}, {}), "('ignore', exc.RemovedIn20Warning)", False, 'import warnings\n'), ((694, 26, 694, 52), 'oslo_context.context.get_current', 'oslo_context.get_current', ({}, {}), '()', True, 'from oslo_context import context as oslo_context\n'), ((695, 24, 695, 62), 'oslo_context.fixture.ClearRequestContext', 'oslo_ctx_fixture.ClearRequestContext', ({}, {}), '()', True, 'from oslo_context import fixture as oslo_ctx_fixture\n'), ((732, 15, 732, 38), 'os.environ.get', 'os.environ.get', ({(732, 30, 732, 37): 'env_var'}, {}), '(env_var)', False, 'import os\n'), ((850, 12, 854, 13), 'keystone.tests.unit.ksfixtures.KeyRepository', 'ksfixtures.KeyRepository', ({(851, 16, 851, 35): 'self.config_fixture', (852, 16, 852, 31): '"""fernet_tokens"""', (853, 16, 853, 50): 'CONF.fernet_tokens.max_active_keys'}, {}), "(self.config_fixture, 'fernet_tokens', CONF.\n fernet_tokens.max_active_keys)", False, 'from keystone.tests.unit import ksfixtures\n'), ((858, 12, 862, 13), 'keystone.tests.unit.ksfixtures.KeyRepository', 'ksfixtures.KeyRepository', ({(859, 16, 859, 35): 'self.config_fixture', (860, 16, 860, 33): '"""fernet_receipts"""', (861, 16, 861, 52): 'CONF.fernet_receipts.max_active_keys'}, {}), "(self.config_fixture, 'fernet_receipts', CONF.\n fernet_receipts.max_active_keys)", False, 'from keystone.tests.unit import ksfixtures\n'), ((872, 46, 872, 73), 'oslo_config.fixture.Config', 'config_fixture.Config', ({(872, 68, 872, 72): 'CONF'}, {}), '(CONF)', True, 'from oslo_config import fixture as config_fixture\n'), ((881, 24, 883, 48), 'fixtures.MockPatchObject', 'fixtures.MockPatchObject', (), '', False, 'import fixtures\n'), ((891, 24, 891, 60), 'fixtures.FakeLogger', 'fixtures.FakeLogger', (), '', False, 'import fixtures\n'), ((900, 21, 900, 39), 'oslo_log.log.getLogger', 'log.getLogger', ({(900, 35, 900, 38): 'mod'}, {}), '(mod)', False, 'from oslo_log import log\n'), ((903, 24, 903, 42), 'keystone.tests.unit.ksfixtures.Cache', 'ksfixtures.Cache', ({}, {}), '()', False, 'from keystone.tests.unit import ksfixtures\n'), ((926, 24, 926, 54), 'keystone.tests.unit.ksfixtures.BackendLoader', 'ksfixtures.BackendLoader', ({(926, 49, 926, 53): 'self'}, {}), '(self)', False, 'from keystone.tests.unit import ksfixtures\n'), ((152, 22, 152, 70), 'testtools.testcase.TestSkipped', 'testcase.TestSkipped', ({(152, 43, 152, 69): '"""Cache globally disabled."""'}, {}), "('Cache globally disabled.')", False, 'from testtools import testcase\n'), ((325, 28, 325, 40), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((336, 36, 336, 48), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((393, 26, 393, 38), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((393, 54, 393, 66), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((406, 22, 406, 34), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((407, 22, 407, 34), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((413, 23, 413, 45), 'hashlib.sha256', 'hashlib.sha256', ({(413, 38, 413, 44): 'access'}, {}), '(access)', False, 'import hashlib\n'), ((1019, 19, 1019, 52), 'oslo_utils.timeutils.parse_isotime', 'timeutils.parse_isotime', ({(1019, 43, 1019, 51): 'expected'}, {}), '(expected)', False, 'from oslo_utils import timeutils\n'), ((1020, 16, 1020, 46), 'oslo_utils.timeutils.parse_isotime', 'timeutils.parse_isotime', ({(1020, 40, 1020, 45): 'value'}, {}), '(value)', False, 'from oslo_utils import timeutils\n'), ((1052, 23, 1052, 53), 'socket.socket', 'socket.socket', ({(1052, 37, 1052, 52): 'socket.AF_INET6'}, {}), '(socket.AF_INET6)', False, 'import socket\n'), ((228, 28, 228, 40), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((354, 33, 354, 45), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((357, 31, 357, 43), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((427, 32, 427, 46), 'os.urandom', 'os.urandom', ({(427, 43, 427, 45): '20'}, {}), '(20)', False, 'import os\n'), ((482, 36, 482, 48), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((157, 30, 157, 78), 'testtools.testcase.TestSkipped', 'testcase.TestSkipped', ({(157, 51, 157, 77): "('%s caching disabled.' % s)"}, {}), "('%s caching disabled.' % s)", False, 'from testtools import testcase\n'), ((453, 12, 453, 30), 'oslo_utils.timeutils.utcnow', 'timeutils.utcnow', ({}, {}), '()', False, 'from oslo_utils import timeutils\n'), ((453, 33, 453, 62), 'datetime.timedelta', 'datetime.timedelta', ({}, {}), '(**expires)', False, 'import datetime\n'), ((525, 12, 525, 30), 'oslo_utils.timeutils.utcnow', 'timeutils.utcnow', ({}, {}), '()', False, 'from oslo_utils import timeutils\n'), ((525, 33, 525, 62), 'datetime.timedelta', 'datetime.timedelta', ({}, {}), '(**expires)', False, 'import datetime\n'), ((172, 34, 173, 57), 'testtools.testcase.TestSkipped', 'testcase.TestSkipped', ({(172, 55, 173, 56): "('%s caching enabled.' % s)"}, {}), "('%s caching enabled.' % s)", False, 'from testtools import testcase\n')] |
sneelco/PyISY | PyISY/Nodes/__init__.py | f1f916cd7951b1b6a5235bb36444c695fe3294e1 |
from .group import Group
from .node import (Node, parse_xml_properties, ATTR_ID)
from time import sleep
from xml.dom import minidom
class Nodes(object):
"""
This class handles the ISY nodes. This class can be used as a dictionary to
navigate through the controller's structure to objects of type
:class:`~PyISY.Nodes.Node` and :class:`~PyISY.Nodes.Group` that represent
objects on the controller.
| parent: ISY class
| root: [optional] String representing the current navigation level's ID
| nids: [optional] list of node ids
| nnames: [optional] list of node names
| nparents: [optional] list of node parents
| nobjs: [optional] list of node objects
| ntypes: [optional] list of node types
| xml: [optional] String of xml data containing the configuration data
:ivar allLowerNodes: Returns all nodes beneath current level
:ivar children: A list of the object's children.
:ivar hasChildren: Indicates if object has children
:ivar name: The name of the current folder in navigation.
"""
nids = []
nnames = []
nparents = []
nobjs = []
ntypes = []
def __init__(self, parent, root=None, nids=None, nnames=None,
nparents=None, nobjs=None, ntypes=None, xml=None):
self.parent = parent
self.root = root
if nids is not None and nnames is not None and nparents is not None \
and nobjs is not None and ntypes is not None:
self.nids = nids
self.nnames = nnames
self.nparents = nparents
self.nobjs = nobjs
self.ntypes = ntypes
elif xml is not None:
self.parse(xml)
def __str__(self):
""" Returns string representation of the nodes/folders/groups. """
if self.root is None:
return 'Folder <root>'
else:
ind = self.nids.index(self.root)
if self.ntypes[ind] == 'folder':
return 'Folder (' + self.root + ')'
elif self.ntypes[ind] == 'group':
return 'Group (' + self.root + ')'
else:
return 'Node (' + self.root + ')'
def __repr__(self):
""" Creates a pretty representation of the nodes/folders/groups. """
# get and sort children
folders = []
groups = []
nodes = []
for child in self.children:
if child[0] is 'folder':
folders.append(child)
elif child[0] is 'group':
groups.append(child)
elif child[0] is 'node':
nodes.append(child)
# initialize data
folders.sort(key=lambda x: x[1])
groups.sort(key=lambda x: x[1])
nodes.sort(key=lambda x: x[1])
out = str(self) + '\n' + self.__reprFolders__(folders) + \
self.__reprGroups__(groups) + self.__reprNodes__(nodes)
return out
def __reprFolders__(self, folders):
# format folders
out = ''
for fold in folders:
fold_obj = self[fold[2]]
out += ' + ' + fold[1] + ': Folder(' + fold[2] + ')\n'
for line in repr(fold_obj).split('\n')[1:]:
if len(line) > 0:
out += ' | ' + line + '\n'
out += ' -\n'
return out
def __reprGroups__(self, groups):
# format groups
out = ''
for group in groups:
out += ' ' + group[1] + ': Group(' + group[2] + ')\n'
return out
def __reprNodes__(self, nodes):
# format nodes
out = ''
for node in nodes:
node_obj = self[node[2]]
if node_obj.hasChildren:
out += ' + '
else:
out += ' '
out += node[1] + ': Node(' + node[2] + ')\n'
if node_obj.hasChildren:
for line in repr(node_obj).split('\n')[1:]:
if len(line) > 0:
out += ' | ' + line + '\n'
out += ' -\n'
return out
def __iter__(self):
"""
Returns an iterator for each node below the current navigation level.
"""
iter_data = self.allLowerNodes
return NodeIterator(self, iter_data, delta=1)
def __reversed__(self):
""" Returns the iterator in reverse order. """
iter_data = self.allLowerNodes
return NodeIterator(self, iter_data, delta=-1)
def _upmsg(self, xmldoc):
"""Updates nodes from event stream message."""
nid = xmldoc.getElementsByTagName('node')[0].firstChild.toxml()
nval = int(xmldoc.getElementsByTagName('action')[0].firstChild.toxml())
ctrl = xmldoc.getElementsByTagName('control')[0].firstChild.toxml()
try:
if ctrl == 'ST':
self.getByID(nid).status.update(nval, force=True, silent=True)
self.parent.log.info('ISY Updated Node: ' + nid)
else:
nid = '{}_{}'.format(nid, ctrl)
status = self.getByID(nid).status
status.update(nval, force=True, silent=True)
self.parent.log.info('ISY Updated Property: ' + nid)
except ValueError:
self.parent.log.warning('Unable to find node:: ' + nid)
def _controlmsg(self, xmldoc):
"""Passes Control events from an event stream message to nodes, for
sending out to subscribers."""
try:
nid = xmldoc.getElementsByTagName('node')[0].firstChild.toxml()
cntrl = xmldoc.getElementsByTagName('control')[0].firstChild.toxml()
except IndexError:
# If there is no node associated with the control message we ignore it
return
self.getByID(nid).controlEvents.notify(cntrl)
self.parent.log.info('ISY Node Control Event: ' + nid + ' ' + cntrl)
def parse(self, xml):
"""
Parses the xml data.
| xml: String of the xml data
"""
try:
xmldoc = minidom.parseString(xml)
except:
self.parent.log.error('ISY Could not parse nodes, '
+ 'poorly formatted XML.')
else:
# get nodes
ntypes = ['folder', 'node', 'group']
for ntype in ntypes:
features = xmldoc.getElementsByTagName(ntype)
for feature in features:
nid = feature.getElementsByTagName('address')[0] \
.firstChild.toxml()
nname = feature.getElementsByTagName('name')[0] \
.firstChild.toxml()
try:
nparent = feature.getElementsByTagName('parent')[0] \
.firstChild.toxml()
except IndexError:
nparent = None
try:
parent_nid = feature.getElementsByTagName('pnode')[0] \
.firstChild.toxml()
except IndexError:
parent_nid = None
try:
type = feature.getElementsByTagName('type')[0] \
.firstChild.toxml()
except IndexError:
type = None
try:
nodeDefId = feature.attributes['nodeDefId'].value
except KeyError:
nodeDefId = None
if ntype == 'folder':
self.insert(nid, nname, nparent, None, ntype)
elif ntype == 'node':
node_xml = self.parent.conn.getNode(nid)
node_doc = minidom.parseString(node_xml) # type: xml.dom.minidom.Document
node = node_doc.getElementsByTagName('node')[0]
(state_val, state_uom, state_prec,
aux_props) = parse_xml_properties(node_doc)
dimmable = '%' in state_uom
self.insert(nid, nname, nparent,
Node(self, nid, state_val, nname,
dimmable,
uom=state_uom, prec=state_prec,
aux_properties=aux_props,
node_def_id=nodeDefId,
parent_nid=parent_nid,
type=type),
ntype)
for id, prop in aux_props.items():
if id == 'ST':
continue
prop_id = '{}_{}'.format(nid, id)
prop_name = '{} {}'.format(nname, id)
self.insert(prop_id, prop_name, nparent,
Node(self, prop_id, prop['value'],
prop_name, False,
uom=prop['uom'],
prec=prop['prec']),
'property')
elif ntype == 'group':
flag = feature.attributes['flag'].value
# Ignore groups that contain 0x08 in the flag since that is a ISY scene that
# contains every device/scene so it will contain some scenes we have not
# seen yet so they are not defined and it includes the ISY MAC addrees in
# newer versions of ISY 5.0.6+ ..
if int(flag) & 0x08:
self.parent.log.info('Skipping group flag=' + flag + " " + nid )
else:
mems = feature.getElementsByTagName('link')
# Build list of members
members = [mem.firstChild.nodeValue for mem in mems]
# Build list of controllers
controllers = []
for mem in mems:
if int(mem.attributes['type'].value) == 16:
controllers.append(mem.firstChild.nodeValue)
self.insert(nid, nname, nparent,
Group(self, nid, nname, members, controllers), ntype)
self.parent.log.info('ISY Loaded Nodes')
def update(self, waitTime=0):
"""
Updates the contents of the class
| waitTime: [optional] Amount of seconds to wait before updating
"""
sleep(waitTime)
xml = self.parent.conn.updateNodes()
if xml is not None:
try:
xmldoc = minidom.parseString(xml)
except:
self.parent.log.error('ISY Could not parse nodes, '
+ 'poorly formatted XML.')
else:
for feature in xmldoc.getElementsByTagName('node'):
nid = feature.attributes['id'].value
(state_val, state_uom, state_prec,
aux_props) = parse_xml_properties(feature)
dimmable = '%' in state_uom
if nid in self.nids:
node = self.getByID(nid)
node.uom = state_uom
node.prec = state_prec
node.dimmable = dimmable
node.status.update(state_val, silent=True)
if len(node.aux_properties) > 0:
node_xml = self.parent.conn.getNode(nid)
node_doc = minidom.parseString(node_xml)
(state_val, state_uom, state_prec,
aux_props) = parse_xml_properties(node_doc)
for key in aux_props.keys():
pid = '{}_{}'.format(nid, key)
prop = self.getByID(pid)
prop.status.update(prop['value'], )
else:
node = Node(self, id, state_val, ' ', dimmable,
uom=state_uom, prec=state_prec,
aux_properties=aux_props)
self.insert(id, ' ', None, node, 'node')
self.parent.log.info('ISY Updated Nodes')
else:
self.parent.log.warning('ISY Failed to update nodes.')
def insert(self, nid, nname, nparent, nobj, ntype):
"""
Inserts a new node into the lists.
| nid: node id
| nname: node name
| nparent: node parent
| nobj: node object
| ntype: node type
"""
self.nids.append(nid)
self.nnames.append(nname)
self.nparents.append(nparent)
self.ntypes.append(ntype)
self.nobjs.append(nobj)
def __getitem__(self, val):
"""
Used for navigating through the node tree. Can take names or IDs.
"""
try:
self.nids.index(val)
fun = self.getByID
except ValueError:
try:
self.nnames.index(val)
fun = self.getByName
except ValueError:
try:
val = int(val)
fun = self.getByInd
except ValueError:
fun = None
if fun:
try:
output = fun(val)
except:
pass
if output:
return output
raise KeyError('Unrecognized Key: [' + val + ']')
def __setitem__(self, val):
return None
def getByName(self, val):
"""
Gets child object with the given name.
| val: String representing name to look for.
"""
for i in range(len(self.nids)):
if self.nparents[i] == self.root and self.nnames[i] == val:
return self.getByInd(i)
def getByID(self, nid):
"""
Gets object with the given ID.
| nid: Integer representing node/group/folder id.
"""
i = self.nids.index(nid)
return self.getByInd(i)
def getByInd(self, i):
"""
Returns the object at the given index in the list.
| i: Integer representing index of node/group/folder.
"""
if self.ntypes[i] in ['group', 'node', 'property']:
return self.nobjs[i]
return Nodes(self.parent, self.nids[i], self.nids, self.nnames,
self.nparents, self.nobjs, self.ntypes)
def parseNotes(self, notes_xml):
spoken = None
if notes_xml is not None and notes_xml != "":
try:
notesdom = minidom.parseString(notes_xml)
except:
self.parent.log.error('ISY Could not parse node, notes '
+ 'poorly formatted XML: ' + notes_xml)
else:
spoken_tag = notesdom.getElementsByTagName('spoken')
if spoken_tag and len(spoken_tag) > 0 and spoken_tag[0].firstChild is not None:
spoken = spoken_tag[0].firstChild.toxml()
return { "spoken": spoken }
@property
def children(self):
out = []
for i in range(len(self.nids)):
if self.nparents[i] == self.root:
out.append((self.ntypes[i], self.nnames[i], self.nids[i]))
return out
@property
def hasChildren(self):
try:
self.nparents.index(self.root)
return True
except:
return False
@property
def name(self):
if self.root is None:
return ''
else:
ind = self.nids.index(self.root)
return self.nnames[ind]
@property
def allLowerNodes(self):
output = []
myname = self.name + '/'
for dtype, name, ident in self.children:
if dtype in ['group', 'node', 'property']:
output.append((dtype, myname + name, ident))
else:
output += [(dtype2, myname + name2, ident2)
for (dtype2, name2, ident2)
in self[ident].allLowerNodes]
return output
class NodeIterator(object):
""" Iterates through a list of nodes, returning node objects. """
def __init__(self, parent, iter_data, delta=1):
self._parent = parent
self._iterdata = iter_data
self._len = len(iter_data)
self._delta = delta
if delta > 0:
self._ind = 0
else:
self._ind = self._len - 1
def __next__(self):
if self._ind >= self._len or self._ind < 0:
raise StopIteration
_, path, ident = self._iterdata[self._ind]
self._ind += self._delta
return (path, self._parent[ident])
def __len__(self):
return self._len
| [((277, 8, 277, 23), 'time.sleep', 'sleep', ({(277, 14, 277, 22): 'waitTime'}, {}), '(waitTime)', False, 'from time import sleep\n'), ((175, 21, 175, 45), 'xml.dom.minidom.parseString', 'minidom.parseString', ({(175, 41, 175, 44): 'xml'}, {}), '(xml)', False, 'from xml.dom import minidom\n'), ((281, 25, 281, 49), 'xml.dom.minidom.parseString', 'minidom.parseString', ({(281, 45, 281, 48): 'xml'}, {}), '(xml)', False, 'from xml.dom import minidom\n'), ((404, 27, 404, 57), 'xml.dom.minidom.parseString', 'minidom.parseString', ({(404, 47, 404, 56): 'notes_xml'}, {}), '(notes_xml)', False, 'from xml.dom import minidom\n'), ((217, 35, 217, 64), 'xml.dom.minidom.parseString', 'minidom.parseString', ({(217, 55, 217, 63): 'node_xml'}, {}), '(node_xml)', False, 'from xml.dom import minidom\n'), ((304, 39, 304, 68), 'xml.dom.minidom.parseString', 'minidom.parseString', ({(304, 59, 304, 67): 'node_xml'}, {}), '(node_xml)', False, 'from xml.dom import minidom\n')] |
easyScience/easyCore | easyCore/Utils/Logging.py | 5d16d5b27803277d0c44886f94dab599f764ae0b | # SPDX-FileCopyrightText: 2021 easyCore contributors <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyCore project <https://github.com/easyScience/easyCore>
__author__ = 'github.com/wardsimon'
__version__ = '0.1.0'
import logging
class Logger:
def __init__(self, log_level: int = logging.INFO):
self.logger = logging.getLogger(__name__)
self.level = log_level
self.logger.setLevel(self.level)
def getLogger(self, logger_name, color: str = '32', defaults: bool = True) -> logging:
"""
Create a logger
:param color:
:param logger_name: logger name. Usually __name__ on creation
:param defaults: Do you want to associate any current file loggers with this logger
:return: A logger
"""
logger = logging.getLogger(logger_name)
logger.setLevel(self.level)
# self.applyLevel(logger)
# for handler_type in self._handlers:
# for handler in self._handlers[handler_type]:
# if handler_type == 'sys' or defaults:
# handler.formatter._fmt = self._makeColorText(color)
# logger.addHandler(handler)
# logger.propagate = False
# self._loggers.append(logger)
return logger
| [((13, 22, 13, 49), 'logging.getLogger', 'logging.getLogger', ({(13, 40, 13, 48): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((25, 17, 25, 47), 'logging.getLogger', 'logging.getLogger', ({(25, 35, 25, 46): 'logger_name'}, {}), '(logger_name)', False, 'import logging\n')] |
mustx1/MYIQ | iqoptionapi/http/billing.py | 3afb597aa8a8abc278b7d70dad46af81789eae3e | """Module for IQ option billing resource."""
from iqoptionapi.http.resource import Resource
class Billing(Resource):
"""Class for IQ option billing resource."""
# pylint: disable=too-few-public-methods
url = "billing"
| [] |
honewatson/defaults | defaultsob/core.py | c6a845ec1f25fc82e7645dfee60dd2df1cfa4e81 | # -*- coding: utf-8 -*-
def ordered_set(iter):
"""Creates an ordered set
@param iter: list or tuple
@return: list with unique values
"""
final = []
for i in iter:
if i not in final:
final.append(i)
return final
def class_slots(ob):
"""Get object attributes from child class attributes
@param ob: Defaults object
@type ob: Defaults
@return: Tuple of slots
"""
current_class = type(ob).__mro__[0]
if not getattr(current_class, 'allslots', None) \
and current_class != object:
_allslots = [list(getattr(cls, '__slots__', []))
for cls in type(ob).__mro__]
_fslots = []
for slot in _allslots:
_fslots = _fslots + slot
current_class.allslots = tuple(ordered_set(_fslots))
return current_class.allslots
def use_if_none_cls(alternative_attr):
def use_if_none(original_attr, ob, kwargs):
"""
Try and get a value from kwargs for original_attr. If there
is no original_attr in kwargs use the alternative_attr value
in the object ob
@param alternative_attr: the alternative attribute
@param original_attr: the original attribute
@param ob: the object with the attributes
@param kwargs: key values
@return: final value
"""
return kwargs.get(original_attr, getattr(ob, alternative_attr, None))
return use_if_none
def usef(attr):
"""Use another value as default
@param attr: the name of the attribute to
use as alternative value
@return: value of alternative attribute
"""
return use_if_none_cls(attr)
use_name_if_none = usef('Name')
def choose_alt(attr, ob, kwargs):
"""If the declared class attribute of ob is callable
then use that callable to get a default ob
instance value if a value is not available in kwargs.
@param attr: ob class attribute name
@param ob: the object instance whose default value needs to be set
@param kwargs: the kwargs values passed to the ob __init__ method
@return: value to be used to set ob instance
"""
result = ob.__class__.__dict__.get(attr, None)
if type(result).__name__ == "member_descriptor":
result = None
elif callable(result):
result = result(attr, ob, kwargs)
return result
class Defaults(object):
"""A base class which allows using slots to define
attributes and the ability to set object
instance defaults at the child class level"""
def __init__(self, **kwargs):
"""Assign kwargs to attributes and defaults to attributes"""
allslots = class_slots(self)
for attr in allslots:
setattr(self, attr, kwargs.get(
attr, choose_alt(attr, self, kwargs)))
def to_dict(self):
"""Returns attributes with values as dict
@return: dictionary of attributes with values
"""
allslots = class_slots(self)
return {
item: getattr(self, item, None)
for item in allslots
}
def to_dict_clean(self):
"""Return a dict where there values of None
are not included
@return: dict of the object properties with values
"""
attribs = self.to_dict()
return {
k: v
for k, v in attribs.items() if v
}
| [] |
item4/yui | tests/bot_test.py | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | import asyncio
from collections import defaultdict
from datetime import timedelta
import pytest
from yui.api import SlackAPI
from yui.bot import Bot
from yui.box import Box
from yui.types.slack.response import APIResponse
from yui.utils import json
from .util import FakeImportLib
def test_bot_init(event_loop, monkeypatch, bot_config):
importlib = FakeImportLib()
monkeypatch.setattr('importlib.import_module', importlib.import_module)
bot_config.APPS = ['yui.app1', 'yui.app2']
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
assert bot.config == bot_config
assert bot.channels == []
assert bot.ims == []
assert bot.groups == []
assert bot.restart is False
assert isinstance(bot.api, SlackAPI)
assert bot.box is box
assert isinstance(bot.queue, asyncio.Queue)
assert importlib.import_queue == [
'yui.app1',
'yui.app2',
]
@pytest.mark.asyncio
async def test_call(event_loop, bot_config, response_mock):
token = 'asdf1234'
response_mock.post(
'https://slack.com/api/test11',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test12',
body=json.dumps({'res': 'hello world!', 'data': {'extra': 'wow'}}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test21',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test22',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test3',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
bot.api.throttle_interval = defaultdict(lambda: timedelta(0))
res = await bot.call('test11')
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test12', data={'extra': 'wow'})
assert res == APIResponse(
body={'res': 'hello world!', 'data': {'extra': 'wow'}},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test21')
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test22', data={'extra': 'wow'})
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test3', token=token)
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
| [((22, 10, 22, 15), 'yui.box.Box', 'Box', ({}, {}), '()', False, 'from yui.box import Box\n'), ((23, 10, 23, 52), 'yui.bot.Bot', 'Bot', (), '', False, 'from yui.bot import Bot\n'), ((75, 10, 75, 15), 'yui.box.Box', 'Box', ({}, {}), '()', False, 'from yui.box import Box\n'), ((76, 10, 76, 52), 'yui.bot.Bot', 'Bot', (), '', False, 'from yui.bot import Bot\n'), ((80, 18, 84, 5), 'yui.types.slack.response.APIResponse', 'APIResponse', (), '', False, 'from yui.types.slack.response import APIResponse\n'), ((87, 18, 91, 5), 'yui.types.slack.response.APIResponse', 'APIResponse', (), '', False, 'from yui.types.slack.response import APIResponse\n'), ((94, 18, 98, 5), 'yui.types.slack.response.APIResponse', 'APIResponse', (), '', False, 'from yui.types.slack.response import APIResponse\n'), ((101, 18, 105, 5), 'yui.types.slack.response.APIResponse', 'APIResponse', (), '', False, 'from yui.types.slack.response import APIResponse\n'), ((108, 18, 112, 5), 'yui.types.slack.response.APIResponse', 'APIResponse', (), '', False, 'from yui.types.slack.response import APIResponse\n'), ((45, 13, 45, 48), 'yui.utils.json.dumps', 'json.dumps', ({(45, 24, 45, 47): "{'res': 'hello world!'}"}, {}), "({'res': 'hello world!'})", False, 'from yui.utils import json\n'), ((51, 13, 51, 74), 'yui.utils.json.dumps', 'json.dumps', ({(51, 24, 51, 73): "{'res': 'hello world!', 'data': {'extra': 'wow'}}"}, {}), "({'res': 'hello world!', 'data': {'extra': 'wow'}})", False, 'from yui.utils import json\n'), ((58, 13, 58, 41), 'yui.utils.json.dumps', 'json.dumps', ({(58, 24, 58, 40): "{'error': 'aaa'}"}, {}), "({'error': 'aaa'})", False, 'from yui.utils import json\n'), ((64, 13, 64, 41), 'yui.utils.json.dumps', 'json.dumps', ({(64, 24, 64, 40): "{'error': 'aaa'}"}, {}), "({'error': 'aaa'})", False, 'from yui.utils import json\n'), ((70, 13, 70, 48), 'yui.utils.json.dumps', 'json.dumps', ({(70, 24, 70, 47): "{'res': 'hello world!'}"}, {}), "({'res': 'hello world!'})", False, 'from yui.utils import json\n'), ((77, 52, 77, 64), 'datetime.timedelta', 'timedelta', ({(77, 62, 77, 63): '0'}, {}), '(0)', False, 'from datetime import timedelta\n')] |
CesMak/aruco_detector_ocv | scripts/marker_filter.py | bb45e39664247779cbbbc8d37b89c4556b4984d6 | #!/usr/bin/env python
import numpy as np
import rospy
import geometry_msgs.msg
import tf2_ros
from tf.transformations import quaternion_slerp
def translation_to_numpy(t):
return np.array([t.x, t.y, t.z])
def quaternion_to_numpy(q):
return np.array([q.x, q.y, q.z, q.w])
if __name__ == '__main__':
rospy.init_node('marker_filter')
alpha = rospy.get_param('~alpha', 0.9)
parent_frame_id = rospy.get_param('~parent_frame_id', 'kinect2_link')
marker_id = rospy.get_param('~marker_id', 'marker_id0')
marker_filtered_id = rospy.get_param(
'~marker_filtered_id', 'marker_id0_filtered')
rate_value = rospy.get_param('~rate_value', 125)
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
br = tf2_ros.TransformBroadcaster()
marker_pose = None
marker_pose0 = None
rate = rospy.Rate(rate_value)
while not rospy.is_shutdown():
marker_pose0 = marker_pose
# Lookup the transform
try:
marker_pose_new = tfBuffer.lookup_transform(
parent_frame_id, marker_id, rospy.Time())
if not marker_pose_new is None:
marker_pose = marker_pose_new
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
rospy.logwarn(e)
if marker_pose is None:
rate.sleep()
continue
# Apply running average filter to translation and rotation
if not marker_pose0 is None:
rotation0 = quaternion_to_numpy(marker_pose0.transform.rotation)
rotation = quaternion_to_numpy(marker_pose.transform.rotation)
rotation_interpolated = quaternion_slerp(
rotation0, rotation, 1 - alpha)
translation0 = translation_to_numpy(
marker_pose0.transform.translation)
translation = translation_to_numpy(
marker_pose.transform.translation)
translation = alpha * translation0 + (1 - alpha) * translation
# Update pose of the marker
marker_pose.transform.rotation.x = rotation_interpolated[0]
marker_pose.transform.rotation.y = rotation_interpolated[1]
marker_pose.transform.rotation.z = rotation_interpolated[2]
marker_pose.transform.rotation.w = rotation_interpolated[3]
marker_pose.transform.translation.x = translation[0]
marker_pose.transform.translation.y = translation[1]
marker_pose.transform.translation.z = translation[2]
# Create new transform and broadcast it
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = parent_frame_id
t.child_frame_id = marker_filtered_id
t.transform = marker_pose.transform
br.sendTransform(t)
rate.sleep()
| [((11, 11, 11, 36), 'numpy.array', 'np.array', ({(11, 20, 11, 35): '[t.x, t.y, t.z]'}, {}), '([t.x, t.y, t.z])', True, 'import numpy as np\n'), ((15, 11, 15, 41), 'numpy.array', 'np.array', ({(15, 20, 15, 40): '[q.x, q.y, q.z, q.w]'}, {}), '([q.x, q.y, q.z, q.w])', True, 'import numpy as np\n'), ((19, 4, 19, 36), 'rospy.init_node', 'rospy.init_node', ({(19, 20, 19, 35): '"""marker_filter"""'}, {}), "('marker_filter')", False, 'import rospy\n'), ((20, 12, 20, 42), 'rospy.get_param', 'rospy.get_param', ({(20, 28, 20, 36): '"""~alpha"""', (20, 38, 20, 41): '0.9'}, {}), "('~alpha', 0.9)", False, 'import rospy\n'), ((21, 22, 21, 73), 'rospy.get_param', 'rospy.get_param', ({(21, 38, 21, 56): '"""~parent_frame_id"""', (21, 58, 21, 72): '"""kinect2_link"""'}, {}), "('~parent_frame_id', 'kinect2_link')", False, 'import rospy\n'), ((22, 16, 22, 59), 'rospy.get_param', 'rospy.get_param', ({(22, 32, 22, 44): '"""~marker_id"""', (22, 46, 22, 58): '"""marker_id0"""'}, {}), "('~marker_id', 'marker_id0')", False, 'import rospy\n'), ((23, 25, 24, 53), 'rospy.get_param', 'rospy.get_param', ({(24, 8, 24, 29): '"""~marker_filtered_id"""', (24, 31, 24, 52): '"""marker_id0_filtered"""'}, {}), "('~marker_filtered_id', 'marker_id0_filtered')", False, 'import rospy\n'), ((25, 17, 25, 52), 'rospy.get_param', 'rospy.get_param', ({(25, 33, 25, 46): '"""~rate_value"""', (25, 48, 25, 51): '125'}, {}), "('~rate_value', 125)", False, 'import rospy\n'), ((27, 15, 27, 31), 'tf2_ros.Buffer', 'tf2_ros.Buffer', ({}, {}), '()', False, 'import tf2_ros\n'), ((28, 15, 28, 50), 'tf2_ros.TransformListener', 'tf2_ros.TransformListener', ({(28, 41, 28, 49): 'tfBuffer'}, {}), '(tfBuffer)', False, 'import tf2_ros\n'), ((29, 9, 29, 39), 'tf2_ros.TransformBroadcaster', 'tf2_ros.TransformBroadcaster', ({}, {}), '()', False, 'import tf2_ros\n'), ((33, 11, 33, 33), 'rospy.Rate', 'rospy.Rate', ({(33, 22, 33, 32): 'rate_value'}, {}), '(rate_value)', False, 'import rospy\n'), ((34, 14, 34, 33), 'rospy.is_shutdown', 'rospy.is_shutdown', ({}, {}), '()', False, 'import rospy\n'), ((74, 25, 74, 41), 'rospy.Time.now', 'rospy.Time.now', ({}, {}), '()', False, 'import rospy\n'), ((54, 36, 55, 47), 'tf.transformations.quaternion_slerp', 'quaternion_slerp', ({(55, 16, 55, 25): 'rotation0', (55, 27, 55, 35): 'rotation', (55, 37, 55, 46): '1 - alpha'}, {}), '(rotation0, rotation, 1 - alpha)', False, 'from tf.transformations import quaternion_slerp\n'), ((39, 44, 39, 56), 'rospy.Time', 'rospy.Time', ({}, {}), '()', False, 'import rospy\n'), ((44, 12, 44, 28), 'rospy.logwarn', 'rospy.logwarn', ({(44, 26, 44, 27): 'e'}, {}), '(e)', False, 'import rospy\n')] |
hankyul2/FaceDA | src/backbone/utils.py | 73006327df3668923d4206f81d4976ca1240329d | import os
import subprocess
from pathlib import Path
from torch.hub import load_state_dict_from_url
import numpy as np
model_urls = {
# ResNet
'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
# MobileNetV2
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
# Se ResNet
'seresnet18': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth',
'seresnet34': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth',
'seresnet50': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth',
'seresnet101': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth',
'seresnet152': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth',
'seresnext50_32x4d': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',
# ViT
'vit_base_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-B_16.npz',
'vit_base_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-B_32.npz',
'vit_large_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-L_16.npz',
'vit_large_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-L_32.npz',
# Hybrid (resnet50 + ViT)
'r50_vit_base_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/R50+ViT-B_16.npz',
'r50_vit_large_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/R50+ViT-L_32.npz',
}
def load_from_zoo(model, model_name, pretrained_path='pretrained/official'):
model_name = change_384_224(model_name)
Path(os.path.join(pretrained_path, model_name)).mkdir(parents=True, exist_ok=True)
if model_urls[model_name].endswith('pth'):
state_dict = load_state_dict_from_url(url=model_urls[model_name],
model_dir=os.path.join(pretrained_path, model_name),
progress=True, map_location='cpu')
state_dict.pop('fc.weight', None)
state_dict.pop('fc.bias', None)
state_dict.pop('classifier.weight', None)
state_dict.pop('classifier.bias', None)
model.load_state_dict(state_dict, strict=False)
elif model_urls[model_name].endswith('npz'):
npz = load_npz_from_url(url=model_urls[model_name],
file_name=os.path.join(pretrained_path, model_name, os.path.basename(model_urls[model_name])))
model.load_npz(npz)
def change_384_224(model_name):
model_name = model_name.replace('384', '224')
return model_name
def load_npz_from_url(url, file_name):
if not Path(file_name).exists():
subprocess.run(["wget", "-r", "-nc", '-O', file_name, url])
return np.load(file_name)
| [((72, 11, 72, 29), 'numpy.load', 'np.load', ({(72, 19, 72, 28): 'file_name'}, {}), '(file_name)', True, 'import numpy as np\n'), ((71, 8, 71, 67), 'subprocess.run', 'subprocess.run', ({(71, 23, 71, 66): "['wget', '-r', '-nc', '-O', file_name, url]"}, {}), "(['wget', '-r', '-nc', '-O', file_name, url])", False, 'import subprocess\n'), ((47, 9, 47, 50), 'os.path.join', 'os.path.join', ({(47, 22, 47, 37): 'pretrained_path', (47, 39, 47, 49): 'model_name'}, {}), '(pretrained_path, model_name)', False, 'import os\n'), ((50, 56, 50, 97), 'os.path.join', 'os.path.join', ({(50, 69, 50, 84): 'pretrained_path', (50, 86, 50, 96): 'model_name'}, {}), '(pretrained_path, model_name)', False, 'import os\n'), ((70, 11, 70, 26), 'pathlib.Path', 'Path', ({(70, 16, 70, 25): 'file_name'}, {}), '(file_name)', False, 'from pathlib import Path\n'), ((60, 84, 60, 124), 'os.path.basename', 'os.path.basename', ({(60, 101, 60, 123): 'model_urls[model_name]'}, {}), '(model_urls[model_name])', False, 'import os\n')] |
pjha1994/Scrape_reddit | crawler1.py | 2a00a83854085e09f0cf53aef81969025876039b | import requests
from bs4 import BeautifulSoup
def recursiveUrl(url, link, depth):
if depth == 5:
return url
else:
print(link['href'])
page = requests.get(url + link['href'])
soup = BeautifulSoup(page.text, 'html.parser')
newlink = soup.find('a')
if len(newlink) == 0:
return link
else:
return link, recursiveUrl(url, newlink, depth + 1)
def getLinks(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
links = soup.find_all('a')
for link in links:
links.append(recursiveUrl(url, link, 0))
return links
links = getLinks("http://www.reddit.com/")
print(links) | [((18, 11, 18, 28), 'requests.get', 'requests.get', ({(18, 24, 18, 27): 'url'}, {}), '(url)', False, 'import requests\n'), ((19, 11, 19, 50), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(19, 25, 19, 34): 'page.text', (19, 36, 19, 49): '"""html.parser"""'}, {}), "(page.text, 'html.parser')", False, 'from bs4 import BeautifulSoup\n'), ((9, 15, 9, 47), 'requests.get', 'requests.get', ({(9, 28, 9, 46): "url + link['href']"}, {}), "(url + link['href'])", False, 'import requests\n'), ((10, 15, 10, 54), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(10, 29, 10, 38): 'page.text', (10, 40, 10, 53): '"""html.parser"""'}, {}), "(page.text, 'html.parser')", False, 'from bs4 import BeautifulSoup\n')] |
BrianThomasRoss/CHIME-2 | chime2/tests/normal/models/seir_test.py | f084ab552fac5e50841a922293b74d653450790b | """Tests for SEIR model in this repo
* Compares conserved quantities
* Compares model against SEIR wo social policies in limit to SIR
"""
from pandas import Series
from pandas.testing import assert_frame_equal, assert_series_equal
from bayes_chime.normal.models import SEIRModel, SIRModel
from pytest import fixture
from tests.normal.models.sir_test import ( # pylint: disable=W0611
fixture_penn_chime_raw_df_no_policy,
fixture_penn_chime_setup,
fixture_sir_data_wo_policy,
)
COLS_TO_COMPARE = [
"susceptible",
"infected",
"recovered",
# Does not compare census as this repo uses the exponential distribution
]
PENN_CHIME_COMMIT = "188c35be9561164bedded4a8071a320cbde0d2bc"
@fixture(name="seir_data")
def fixture_seir_data(sir_data_wo_policy):
"""Returns data for the SIHR model
"""
x, p = sir_data_wo_policy
pp = p.copy()
xx = x.copy()
pp["alpha"] = 0.5
pp["nu"] = 1
pp["initial_exposed"] = 0
return xx, pp
def test_conserved_n(seir_data):
"""Checks if S + E + I + R is conserved for SEIR
"""
x, pars = seir_data
n_total = 0
for key in SEIRModel.compartments:
n_total += pars[f"initial_{key}"]
seir_model = SEIRModel()
predictions = seir_model.propagate_uncertainties(x, pars)
n_computed = predictions[SEIRModel.compartments].sum(axis=1)
n_expected = Series(data=[n_total] * len(n_computed), index=n_computed.index)
assert_series_equal(n_expected, n_computed)
def test_compare_sir_vs_seir(sir_data_wo_policy, seir_data, monkeypatch):
"""Checks if SEIR and SIR return same results if the code enforces
* alpha = gamma
* E = 0
* dI = dE
"""
x_sir, pars_sir = sir_data_wo_policy
x_seir, pars_seir = seir_data
pars_seir["alpha"] = pars_sir["gamma"] # will be done by hand
def mocked_seir_step(data, **pars):
data["exposed"] = 0
new_data = SEIRModel.simulation_step(data, **pars)
new_data["infected"] += new_data["exposed_new"]
return new_data
seir_model = SEIRModel()
monkeypatch.setattr(seir_model, "simulation_step", mocked_seir_step)
sir_model = SIRModel()
predictions_sir = sir_model.propagate_uncertainties(x_sir, pars_sir)
predictions_seir = seir_model.propagate_uncertainties(x_seir, pars_seir)
assert_frame_equal(
predictions_sir[COLS_TO_COMPARE], predictions_seir[COLS_TO_COMPARE],
)
| [((26, 1, 26, 26), 'pytest.fixture', 'fixture', (), '', False, 'from pytest import fixture\n'), ((49, 17, 49, 28), 'bayes_chime.normal.models.SEIRModel', 'SEIRModel', ({}, {}), '()', False, 'from bayes_chime.normal.models import SEIRModel, SIRModel\n'), ((55, 4, 55, 47), 'pandas.testing.assert_series_equal', 'assert_series_equal', ({(55, 24, 55, 34): 'n_expected', (55, 36, 55, 46): 'n_computed'}, {}), '(n_expected, n_computed)', False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((76, 17, 76, 28), 'bayes_chime.normal.models.SEIRModel', 'SEIRModel', ({}, {}), '()', False, 'from bayes_chime.normal.models import SEIRModel, SIRModel\n'), ((79, 16, 79, 26), 'bayes_chime.normal.models.SIRModel', 'SIRModel', ({}, {}), '()', False, 'from bayes_chime.normal.models import SEIRModel, SIRModel\n'), ((83, 4, 85, 5), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', ({(84, 8, 84, 40): 'predictions_sir[COLS_TO_COMPARE]', (84, 42, 84, 75): 'predictions_seir[COLS_TO_COMPARE]'}, {}), '(predictions_sir[COLS_TO_COMPARE], predictions_seir[\n COLS_TO_COMPARE])', False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((72, 19, 72, 58), 'bayes_chime.normal.models.SEIRModel.simulation_step', 'SEIRModel.simulation_step', ({(72, 45, 72, 49): 'data'}, {}), '(data, **pars)', False, 'from bayes_chime.normal.models import SEIRModel, SIRModel\n')] |
mrware91/PhilTransA-TRXS-Limits | Libraries/mattsLibraries/mathOperations.py | 5592c6c66276cd493d10f066aa636aaf600d3a00 | import numpy as np
from scipy.interpolate import interp1d
from pyTools import *
################################################################################
#~~~~~~~~~Log ops
################################################################################
def logPolyVal(p,x):
ord = p.order()
logs = []
for idx in xrange(ord+1):
logs.append( np.log( p[idx] ) + (ord-idx)*np.log(x) )
return logs
################################################################################
#~~~~~~~~~Symmeterize data
################################################################################
def symmeterize( x, y, interp_type='cubic' ):
if x.min() <= 0:
raise ValueError('x.min() must be greater than zero.')
xs = np.array([-x,x]).flatten()
xs.sort()
f = interp1d( x , y , kind=interp_type )
return { 'x':xs , 'y':f(np.abs(xs)) }
################################################################################
#~~~~~~~~~3D Shapes
################################################################################
def makeSphere(x0=0,y0=0,z0=0,r=1,ntheta=30,nphi=30):
u = np.linspace(0, np.pi, ntheta)
v = np.linspace(0, 2 * np.pi, nphi)
x = np.outer(np.sin(u), np.sin(v))*r
y = np.outer(np.sin(u), np.cos(v))*r
z = np.outer(np.cos(u), np.ones_like(v))*r
return x+x0, y+y0, z+z0
def makeCylinder(x0=0,y0=0,z0=0,r=1,h=10,ntheta=30,nz=30):
u = np.linspace(0, 2*np.pi, ntheta)
z = np.linspace(0, h, nz)
UU,ZZ = np.meshgrid(u,z)
XX = np.cos(UU)*r
YY = np.sin(UU)*r
# ax.plot_wireframe(x, y, z)
return XX+x0, YY+y0, ZZ+z0
def generateLine3D( x0=0, x1=1, y0=0, y1=1, z0=0, z1=0, N=2 ):
return {'line':{'xData':np.linspace(x0,x1,N),
'yData':np.linspace(y0,y1,N),
'zData':np.linspace(z0,z1,N),
'cData':np.ones((N,1))}}
################################################################################
#~~~~~~~~~2D Shapes
################################################################################
def generateCircle(R=1, X0=0, Y0=0, N = 60, thetaMin = 0, thetaMax = 2*np.pi ):
thetas = np.linspace( thetaMin , thetaMax , N)
uY = np.sin( thetas )*R
uX = np.cos( thetas )*R
return {'circle':{'xData':uX+X0, 'yData':uY+Y0}}
def generateEllipse( RX=2, RY=1, X0=0, Y0=0, N = 60, thetaMin = 0, thetaMax = 2*np.pi ):
thetas = np.linspace( thetaMin , thetaMax , N)
uY = np.sin( thetas )*RY
uX = np.cos( thetas )*RX
return {'ellipse':{'xData':uX+X0, 'yData':uY+Y0}}
def makeCylinder2D( L = 10., R = 1., N=60, view_degrees=30. ):
yFac = np.cos(view_degrees * np.pi/180.)
zFac = np.sin(view_degrees * np.pi/180.)
xL = np.ones((2,1))*-R
xR = -xL
y = np.array([0,L])*yFac
cylinder = { 'leftSide':{'xData':xL, 'yData':y},
'rightSide':{'xData':xR, 'yData':y},
'upperEllipse':generateEllipse(RX = R, RY=R*zFac, Y0=L*yFac,N=N)['ellipse'],
'lowerHalfEllipse':generateEllipse(RX = R, RY=R*zFac, thetaMin=np.pi, thetaMax=2*np.pi, N=int(N/2.))['ellipse']}
return cylinder
################################################################################
#~~~~~~~~~Rotations
################################################################################
def rotateObject(x,y,z,ax=None,ay=None,az=None):
if ax is not None:
y,z = rotateAt(y,z,ax)
if ay is not None:
x,z = rotateAt(x,z,-ay)
if az is not None:
x,y = rotateAt(x,y,az)
return x,y,z
def rotateAt(x,y,a):
xp = np.cos(a)*x-np.sin(a)*y
yp = np.cos(a)*y+np.sin(a)*x
return xp, yp
def rotateObj2D( obj_in, degrees ):
obj = obj_in.copy()
keys = obj.keys()
for key in keys:
obj[key] = rotate2D( degrees=degrees, **obj[key] )
return obj
def rotate2D( xData, yData, degrees ):
x = xData.flatten()
y = yData.flatten()
z = np.zeros_like(x)
x,y,z = rotateObject( x, y, z, az=float(degrees)/180.*np.pi )
return {'xData':x, 'yData':y}
def rotateObj3D( obj_in, gamma, theta, phi ):
obj = obj_in.copy()
keys = obj.keys()
for key in keys:
obj[key] = rotate3D( gamma=gamma, theta=theta, phi=phi, **obj[key] )
return obj
def rotate3D( xData, yData, zData, gamma, theta, phi, kwargs_toggle=True, **kwargs ):
ignore_kwargs(kwargs, toggle=kwargs_toggle)
x = xData.flatten()
y = yData.flatten()
z = zData.flatten()
x,y,z = rotateObject( x, y, z, az=float(gamma)/180.*np.pi )
x,y,z = rotateObject( x, y, z, ay=float(theta)/180.*np.pi )
x,y,z = rotateObject( x, y, z, az=float(phi)/180.*np.pi )
return {'xData':x, 'yData':y, 'zData':z}
| [((25, 8, 25, 44), 'scipy.interpolate.interp1d', 'interp1d', (), '', False, 'from scipy.interpolate import interp1d\n'), ((33, 8, 33, 37), 'numpy.linspace', 'np.linspace', ({(33, 20, 33, 21): '0', (33, 23, 33, 28): 'np.pi', (33, 30, 33, 36): 'ntheta'}, {}), '(0, np.pi, ntheta)', True, 'import numpy as np\n'), ((34, 8, 34, 39), 'numpy.linspace', 'np.linspace', ({(34, 20, 34, 21): '0', (34, 23, 34, 32): '2 * np.pi', (34, 34, 34, 38): 'nphi'}, {}), '(0, 2 * np.pi, nphi)', True, 'import numpy as np\n'), ((42, 8, 42, 39), 'numpy.linspace', 'np.linspace', ({(42, 20, 42, 21): '0', (42, 23, 42, 30): '2 * np.pi', (42, 32, 42, 38): 'ntheta'}, {}), '(0, 2 * np.pi, ntheta)', True, 'import numpy as np\n'), ((43, 9, 43, 30), 'numpy.linspace', 'np.linspace', ({(43, 21, 43, 22): '0', (43, 24, 43, 25): 'h', (43, 27, 43, 29): 'nz'}, {}), '(0, h, nz)', True, 'import numpy as np\n'), ((45, 12, 45, 28), 'numpy.meshgrid', 'np.meshgrid', ({(45, 24, 45, 25): 'u', (45, 26, 45, 27): 'z'}, {}), '(u, z)', True, 'import numpy as np\n'), ((63, 13, 63, 50), 'numpy.linspace', 'np.linspace', ({(63, 26, 63, 34): 'thetaMin', (63, 37, 63, 45): 'thetaMax', (63, 48, 63, 49): 'N'}, {}), '(thetaMin, thetaMax, N)', True, 'import numpy as np\n'), ((69, 13, 69, 50), 'numpy.linspace', 'np.linspace', ({(69, 26, 69, 34): 'thetaMin', (69, 37, 69, 45): 'thetaMax', (69, 48, 69, 49): 'N'}, {}), '(thetaMin, thetaMax, N)', True, 'import numpy as np\n'), ((76, 11, 76, 44), 'numpy.cos', 'np.cos', ({(76, 18, 76, 43): 'view_degrees * np.pi / 180.0'}, {}), '(view_degrees * np.pi / 180.0)', True, 'import numpy as np\n'), ((77, 11, 77, 44), 'numpy.sin', 'np.sin', ({(77, 18, 77, 43): 'view_degrees * np.pi / 180.0'}, {}), '(view_degrees * np.pi / 180.0)', True, 'import numpy as np\n'), ((118, 8, 118, 24), 'numpy.zeros_like', 'np.zeros_like', ({(118, 22, 118, 23): 'x'}, {}), '(x)', True, 'import numpy as np\n'), ((47, 9, 47, 19), 'numpy.cos', 'np.cos', ({(47, 16, 47, 18): 'UU'}, {}), '(UU)', True, 'import numpy as np\n'), ((48, 9, 48, 19), 'numpy.sin', 'np.sin', ({(48, 16, 48, 18): 'UU'}, {}), '(UU)', True, 'import numpy as np\n'), ((64, 9, 64, 25), 'numpy.sin', 'np.sin', ({(64, 17, 64, 23): 'thetas'}, {}), '(thetas)', True, 'import numpy as np\n'), ((65, 9, 65, 25), 'numpy.cos', 'np.cos', ({(65, 17, 65, 23): 'thetas'}, {}), '(thetas)', True, 'import numpy as np\n'), ((70, 9, 70, 25), 'numpy.sin', 'np.sin', ({(70, 17, 70, 23): 'thetas'}, {}), '(thetas)', True, 'import numpy as np\n'), ((71, 9, 71, 25), 'numpy.cos', 'np.cos', ({(71, 17, 71, 23): 'thetas'}, {}), '(thetas)', True, 'import numpy as np\n'), ((79, 9, 79, 23), 'numpy.ones', 'np.ones', ({(79, 17, 79, 22): '(2, 1)'}, {}), '((2, 1))', True, 'import numpy as np\n'), ((81, 9, 81, 24), 'numpy.array', 'np.array', ({(81, 18, 81, 23): '[0, L]'}, {}), '([0, L])', True, 'import numpy as np\n'), ((22, 9, 22, 25), 'numpy.array', 'np.array', ({(22, 18, 22, 24): '[-x, x]'}, {}), '([-x, x])', True, 'import numpy as np\n'), ((27, 28, 27, 38), 'numpy.abs', 'np.abs', ({(27, 35, 27, 37): 'xs'}, {}), '(xs)', True, 'import numpy as np\n'), ((36, 17, 36, 26), 'numpy.sin', 'np.sin', ({(36, 24, 36, 25): 'u'}, {}), '(u)', True, 'import numpy as np\n'), ((36, 28, 36, 37), 'numpy.sin', 'np.sin', ({(36, 35, 36, 36): 'v'}, {}), '(v)', True, 'import numpy as np\n'), ((37, 17, 37, 26), 'numpy.sin', 'np.sin', ({(37, 24, 37, 25): 'u'}, {}), '(u)', True, 'import numpy as np\n'), ((37, 28, 37, 37), 'numpy.cos', 'np.cos', ({(37, 35, 37, 36): 'v'}, {}), '(v)', True, 'import numpy as np\n'), ((38, 17, 38, 26), 'numpy.cos', 'np.cos', ({(38, 24, 38, 25): 'u'}, {}), '(u)', True, 'import numpy as np\n'), ((38, 28, 38, 43), 'numpy.ones_like', 'np.ones_like', ({(38, 41, 38, 42): 'v'}, {}), '(v)', True, 'import numpy as np\n'), ((54, 28, 54, 48), 'numpy.linspace', 'np.linspace', ({(54, 40, 54, 42): 'x0', (54, 43, 54, 45): 'x1', (54, 46, 54, 47): 'N'}, {}), '(x0, x1, N)', True, 'import numpy as np\n'), ((55, 20, 55, 40), 'numpy.linspace', 'np.linspace', ({(55, 32, 55, 34): 'y0', (55, 35, 55, 37): 'y1', (55, 38, 55, 39): 'N'}, {}), '(y0, y1, N)', True, 'import numpy as np\n'), ((56, 20, 56, 40), 'numpy.linspace', 'np.linspace', ({(56, 32, 56, 34): 'z0', (56, 35, 56, 37): 'z1', (56, 38, 56, 39): 'N'}, {}), '(z0, z1, N)', True, 'import numpy as np\n'), ((57, 20, 57, 34), 'numpy.ones', 'np.ones', ({(57, 28, 57, 33): '(N, 1)'}, {}), '((N, 1))', True, 'import numpy as np\n'), ((102, 9, 102, 18), 'numpy.cos', 'np.cos', ({(102, 16, 102, 17): 'a'}, {}), '(a)', True, 'import numpy as np\n'), ((102, 21, 102, 30), 'numpy.sin', 'np.sin', ({(102, 28, 102, 29): 'a'}, {}), '(a)', True, 'import numpy as np\n'), ((103, 9, 103, 18), 'numpy.cos', 'np.cos', ({(103, 16, 103, 17): 'a'}, {}), '(a)', True, 'import numpy as np\n'), ((103, 21, 103, 30), 'numpy.sin', 'np.sin', ({(103, 28, 103, 29): 'a'}, {}), '(a)', True, 'import numpy as np\n'), ((12, 21, 12, 37), 'numpy.log', 'np.log', ({(12, 29, 12, 35): 'p[idx]'}, {}), '(p[idx])', True, 'import numpy as np\n'), ((12, 50, 12, 59), 'numpy.log', 'np.log', ({(12, 57, 12, 58): 'x'}, {}), '(x)', True, 'import numpy as np\n')] |
avryhof/ambient_api | setup.py | 08194b5d8626801f2c2c7369adacb15eace54802 | from setuptools import setup
setup(
name="ambient_api",
version="1.5.6",
packages=["ambient_api"],
url="https://github.com/avryhof/ambient_api",
license="MIT",
author="Amos Vryhof",
author_email="[email protected]",
description="A Python class for accessing the Ambient Weather API.",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
install_requires=["requests", "urllib3"],
)
| [((3, 0, 19, 1), 'setuptools.setup', 'setup', (), '', False, 'from setuptools import setup\n')] |
ganeshutah/FPChecker | tests/llvm/static/test_main_is_found/test_main_is_found.py | 53a471429762ace13f69733cb2f8b7227fc15b9f | #!/usr/bin/env python
import subprocess
import os
def setup_module(module):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(THIS_DIR)
def teardown_module(module):
cmd = ["make clean"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def test_1():
cmd = ["make"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
passed = False
for l in cmdOutput.decode('utf-8').split("\n"):
if "#FPCHECKER: main() found" in l:
passed = True
assert passed == True
| [((8, 4, 8, 22), 'os.chdir', 'os.chdir', ({(8, 13, 8, 21): 'THIS_DIR'}, {}), '(THIS_DIR)', False, 'import os\n'), ((12, 16, 12, 82), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((17, 16, 17, 82), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n'), ((7, 31, 7, 56), 'os.path.abspath', 'os.path.abspath', ({(7, 47, 7, 55): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
kamnon/regipy | regipy/exceptions.py | 12d3be9da631dcc0d6fb342767e51ec4799141c6 | class RegipyException(Exception):
"""
This is the parent exception for all regipy exceptions
"""
pass
class RegipyGeneralException(RegipyException):
"""
General exception
"""
pass
class RegistryValueNotFoundException(RegipyException):
pass
class NoRegistrySubkeysException(RegipyException):
pass
class NoRegistryValuesException(RegipyException):
pass
class RegistryKeyNotFoundException(RegipyException):
pass
class UnidentifiedHiveException(RegipyException):
pass
class RegistryRecoveryException(RegipyException):
pass
class RegistryParsingException(RegipyException):
"""
Raised when there is a parsing error, most probably a corrupted hive
"""
pass
class NtSidDecodingException(RegipyException):
"""
Raised when the binary Windows NT SID representation can not be decoded
"""
| [] |
Zhenye-Na/LxxxCode | Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | from collections import deque
class Solution:
"""
@param n: a positive integer
@return: the minimum number of replacements
"""
def integerReplacement(self, n):
# Write your code here
steps = 0
if n == 1:
return steps
queue = deque([n])
while queue:
size = len(queue)
print(queue, steps)
for _ in range(size):
num = queue.popleft()
if num == 1:
return steps
if num % 2 == 0:
queue.append(num // 2)
else:
queue.append(num + 1)
queue.append(num - 1)
steps += 1
return 0
| [((16, 16, 16, 26), 'collections.deque', 'deque', ({(16, 22, 16, 25): '[n]'}, {}), '([n])', False, 'from collections import deque\n')] |
enflo/weather-flask | src/routes/web.py | c4d905e1f557b4c9b39d0a578fdbb6fefc839028 | from flask import Blueprint, render_template
from gateways.models import getWeatherData
web = Blueprint("web", __name__, template_folder='templates')
@web.route("/", methods=['GET'])
def home():
items = getWeatherData.get_last_item()
cityName = items["city"]
return render_template("index.html",
city=cityName[0],
temperature=items["temperature"],
humidity=items["humidity"],
pressure=items["pressure"])
#@web.route("/profile", methods=['GET'])
#def profile():
# items = getWeatherData.get_last_item()
# return render_template("profile.html",
# celcius=items["temperature"],
# humidity=items["humidity"],
# pressure=items["pressure"])
#@web.route("/about", methods=['GET'])
#def about():
# return render_template("about.html")
| [((5, 6, 5, 61), 'flask.Blueprint', 'Blueprint', (), '', False, 'from flask import Blueprint, render_template\n'), ((9, 12, 9, 42), 'gateways.models.getWeatherData.get_last_item', 'getWeatherData.get_last_item', ({}, {}), '()', False, 'from gateways.models import getWeatherData\n'), ((11, 11, 15, 55), 'flask.render_template', 'render_template', (), '', False, 'from flask import Blueprint, render_template\n')] |
bowlofstew/changes | changes/buildsteps/lxc.py | ebd393520e0fdb07c240a8d4e8747281b6186e28 | from __future__ import absolute_import
from changes.buildsteps.default import DefaultBuildStep
class LXCBuildStep(DefaultBuildStep):
"""
Similar to the default build step, except that it runs the client using
the LXC adapter.
"""
def can_snapshot(self):
return True
def get_label(self):
return 'Build via Changes Client (LXC)'
def get_client_adapter(self):
return 'lxc'
def get_allocation_params(self, jobstep):
params = super(LXCBuildStep, self).get_allocation_params(jobstep)
params['memory'] = str(self.resources['mem'])
params['cpus'] = str(self.resources['cpus'])
return params
| [] |
anvytran-dev/mycode | swapidemo1.py | 3753c19828f0ecc506a6450bb6b71b4a5d651e5f | #!/usr/bin/env python3
"""Star Wars API HTTP response parsing"""
# requests is used to send HTTP requests (get it?)
import requests
URL= "https://swapi.dev/api/people/1"
def main():
"""sending GET request, checking response"""
# SWAPI response is stored in "resp" object
resp= requests.get(URL)
# what kind of python object is "resp"?
print("This object class is:", type(resp), "\n")
# what can we do with it?
print("Methods/Attributes include:", dir(resp))
if __name__ == "__main__":
main()
| [((13, 10, 13, 27), 'requests.get', 'requests.get', ({(13, 23, 13, 26): 'URL'}, {}), '(URL)', False, 'import requests\n')] |
Deltares/NBSDynamics | src/biota_models/vegetation/model/constants_json_create.py | 4710da529d85b588ea249f6e2b4f4cac132bb34f | import json
schema = {
"Spartina": {
"ColStart": "2000-04-01",
"ColEnd": "2000-05-31",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.05,
"initial shoot length": 0.015,
"initial diameter": 0.003,
"start growth period": "2000-04-01",
"end growth period": "2000-10-31",
"start winter period": "2000-11-30",
"maximum plant height": [0.8, 1.3],
"maximum diameter": [0.003, 0.005],
"maximum root length": [0.2, 1],
"maximum years in LifeStage": [1, 19],
"numStem": [700, 700], # 3.5. number of stems per m2
"iniCol_frac": 0.6, # 3.6. initial colonization fraction (0-1)
"Cd": [1.1, 1.15], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.4, 0.4], # 3.11. flooding mortality threshold
"floMort_slope": [0.25, 0.25], # 3.12. flooding mortality slope
"vel_thres": [0.15, 0.25], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.4, 0.4], # 3.15 max height during winter time
},
"Salicornia": {
"ColStart": "2000-02-15",
"ColEnd": "2000-04-30",
"random": 20,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 1,
"Number LifeStages": 1,
"initial root length": 0.15,
"initial shoot length": 0.05,
"initial diameter": 0.01,
"start growth period": "2000-02-15",
"end growth period": "2000-10-15",
"start winter period": "2000-11-01",
"maximum plant height": [0.4, 0],
"maximum diameter": [0.015, 0],
"maximum root length": [0.05, 0],
"maximum years in LifeStage": [1, 0],
"numStem": [190, 0], # 3.5. number of stems per m2
"iniCol_frac": 0.2, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0], # 3.7. drag coefficient
"desMort_thres": [400, 1], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 1], # 3.10. dessication mortality slope
"floMort_thres": [0.5, 1], # 3.11. flooding mortality threshold
"floMort_slope": [0.12, 1], # 3.12. flooding mortality slope
"vel_thres": [0.15, 1], # 3.13. flow velocity threshold
"vel_slope": [3, 1], # 3.14. flow velocity slope
"maxH_winter": [0.0, 0.0], # 3.15 max height during winter time
},
"Puccinellia": {
"ColStart": "2000-03-01",
"ColEnd": "2000-04-30",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.02,
"initial shoot length": 0.05,
"initial diameter": 0.004,
"start growth period": "2000-03-01",
"end growth period": "2000-11-15",
"start winter period": "2000-11-30",
"maximum plant height": [0.2, 0.35],
"maximum diameter": [0.004, 0.005],
"maximum root length": [0.15, 0.15],
"maximum years in LifeStage": [1, 19],
"numStem": [6500, 6500], # 3.5. number of stems per m2
"iniCol_frac": 0.3, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0.7], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.35, 0.35], # 3.11. flooding mortality threshold
"floMort_slope": [0.4, 0.4], # 3.12. flooding mortality slope
"vel_thres": [0.25, 0.5], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.2, 0.2], # 3.15 max height during winter time
},
}
with open("constants_veg.json", "w") as write_file:
json.dump(schema, write_file, indent=4)
| [((94, 4, 94, 43), 'json.dump', 'json.dump', (), '', False, 'import json\n')] |
harshad-deo/TorchVI | format/format.bzl | f66d1486201368c9906869477ba7ae254d2e7191 | def _replace_formatted(ctx, manifest, files):
out = ctx.actions.declare_file(ctx.label.name)
# this makes it easier to add variables
file_lines = [
"""#!/bin/bash -e
WORKSPACE_ROOT="${1:-$BUILD_WORKSPACE_DIRECTORY}" """,
"""RUNPATH="${TEST_SRCDIR-$0.runfiles}"/""" + ctx.workspace_name,
"""RUNPATH=(${RUNPATH//bin/ })
RUNPATH="${RUNPATH[0]}"bin
echo $WORKSPACE_ROOT
echo $RUNPATH
while read original formatted; do
if [[ ! -z "$original" ]] && [[ ! -z "$formatted" ]]; then
if ! cmp -s "$WORKSPACE_ROOT/$original" "$RUNPATH/$formatted"; then
echo "Formatting $original"
cp "$RUNPATH/$formatted" "$WORKSPACE_ROOT/$original"
fi
fi
done < "$RUNPATH"/""" + manifest.short_path,
]
file_content = "\n".join(file_lines)
ctx.actions.write(
output = out,
content = file_content,
)
files.append(manifest)
return [DefaultInfo(files = depset(files), executable = out)]
def _build_format_py(ctx):
files = []
manifest_content = []
for src in ctx.files.srcs:
if src.is_source:
file = ctx.actions.declare_file("{}.format.output".format(src.short_path))
files.append(file)
ctx.actions.run(
arguments = [src.path, file.path],
executable = ctx.executable._fmt,
outputs = [file],
inputs = [src, ctx.file._style],
)
manifest_content.append("{} {}".format(src.short_path, file.short_path))
manifest = ctx.actions.declare_file("format/{}/manifest.txt".format(ctx.label.name))
ctx.actions.write(manifest, "\n".join(manifest_content) + "\n")
return manifest, files
def _format_py_impl(ctx):
manifest, files = _build_format_py(ctx)
return _replace_formatted(ctx, manifest, files)
format_py = rule(
implementation = _format_py_impl,
executable = True,
attrs = {
"srcs": attr.label_list(
allow_files = [".py"],
mandatory = True,
),
"_fmt": attr.label(
cfg = "host",
default = "//format:format_py",
executable = True,
),
"_style": attr.label(
allow_single_file = True,
default = ":setup.cfg",
),
},
)
| [] |
GunnerJnr/_CodeInstitute | Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py | efba0984a3dc71558eef97724c85e274a712798c | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser, UserManager
from django.db import models
from django.utils import timezone
# Create your models here.
# Create our new user class
class AccountUserManager(UserManager):
def _create_user(self, username, email, password, is_staff, is_supervisor, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
:param username:
:param email:
:param password:
:param is_staff:
:param is_supervisor:
:param extra_fields:
:return:
"""
now = timezone.now()
if not email:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=email, email=email,
is_staff=is_staff, is_active=True,
is_supervisor=is_supervisor,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self.db)
return user
class User(AbstractUser):
# now that we've abstracted this class we can add any
# number of custom attribute to our user class
# in later units we'll be adding things like payment details!
object = AccountUserManager()
| [((22, 14, 22, 28), 'django.utils.timezone.now', 'timezone.now', ({}, {}), '()', False, 'from django.utils import timezone\n')] |
mahmoudnafifi/HistoGAN | histoGAN.py | 50be1482638ace3ec85d733e849dec494ede155b | """
If you find this code useful, please cite our paper:
Mahmoud Afifi, Marcus A. Brubaker, and Michael S. Brown. "HistoGAN:
Controlling Colors of GAN-Generated and Real Images via Color Histograms."
In CVPR, 2021.
@inproceedings{afifi2021histogan,
title={Histo{GAN}: Controlling Colors of {GAN}-Generated and Real Images via
Color Histograms},
author={Afifi, Mahmoud and Brubaker, Marcus A. and Brown, Michael S.},
booktitle={CVPR},
year={2021}
}
"""
from tqdm import tqdm
from histoGAN import Trainer, NanException
from histogram_classes.RGBuvHistBlock import RGBuvHistBlock
from datetime import datetime
import torch
import argparse
from retry.api import retry_call
import os
from PIL import Image
from torchvision import transforms
import numpy as np
SCALE = 1 / np.sqrt(2.0)
def train_from_folder(
data='./dataset/',
results_dir='./results',
models_dir='./models',
name='test',
new=False,
load_from=-1,
image_size=128,
network_capacity=16,
transparent=False,
batch_size=2,
gradient_accumulate_every=8,
num_train_steps=150000,
learning_rate=2e-4,
num_workers=None,
save_every=1000,
generate=False,
save_noise_latent=False,
target_noise_file=None,
target_latent_file=None,
num_image_tiles=8,
trunc_psi=0.75,
fp16=False,
fq_layers=[],
fq_dict_size=256,
attn_layers=[],
hist_method='inverse-quadratic',
hist_resizing='sampling',
hist_sigma=0.02,
hist_bin=64,
hist_insz=150,
alpha=2,
target_hist=None,
aug_prob=0.0,
dataset_aug_prob=0.0,
aug_types=None):
model = Trainer(
name,
results_dir,
models_dir,
batch_size=batch_size,
gradient_accumulate_every=gradient_accumulate_every,
image_size=image_size,
network_capacity=network_capacity,
transparent=transparent,
lr=learning_rate,
num_workers=num_workers,
save_every=save_every,
trunc_psi=trunc_psi,
fp16=fp16,
fq_layers=fq_layers,
fq_dict_size=fq_dict_size,
attn_layers=attn_layers,
hist_insz=hist_insz,
hist_bin=hist_bin,
hist_sigma=hist_sigma,
hist_resizing=hist_resizing,
hist_method=hist_method,
aug_prob=aug_prob,
dataset_aug_prob=dataset_aug_prob,
aug_types=aug_types
)
if not new:
model.load(load_from)
else:
model.clear()
if generate:
now = datetime.now()
timestamp = now.strftime("%m-%d-%Y_%H-%M-%S")
if save_noise_latent and not os.path.exists('temp'):
os.mkdir('./temp')
if save_noise_latent and not os.path.exists(f'./temp/{name}'):
os.mkdir(f'./temp/{name}')
if target_hist is None:
raise Exception('No target histogram or image is given')
extension = os.path.splitext(target_hist)[1]
if extension == '.npy':
hist = np.load(target_hist)
h = torch.from_numpy(hist).to(device=torch.cuda.current_device())
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(target_hist)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/{samples_name}')
elif str.lower(extension) == '.jpg' or str.lower(extension) == '.png':
histblock = RGBuvHistBlock(insz=hist_insz, h=hist_bin,
resizing=hist_resizing, method=hist_method,
sigma=hist_sigma,
device=torch.cuda.current_device())
transform = transforms.Compose([transforms.ToTensor()])
img = Image.open(target_hist)
img = torch.unsqueeze(transform(img), dim=0).to(
device=torch.cuda.current_device())
h = histblock(img)
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(target_hist)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/{samples_name}')
elif extension == '':
files = [os.path.join(target_hist, f) for f in os.listdir(target_hist) if
os.path.isfile(os.path.join(target_hist, f))]
histblock = RGBuvHistBlock(insz=hist_insz, h=hist_bin,
resizing=hist_resizing, method=hist_method,
sigma=hist_sigma,
device=torch.cuda.current_device())
transform = transforms.Compose([transforms.ToTensor()])
for f in files:
extension = os.path.splitext(f)[1]
if extension == '.npy':
hist = np.load(f)
h = torch.from_numpy(hist).to(device=torch.cuda.current_device())
elif (extension == str.lower(extension) == '.jpg' or str.lower(
extension) == '.png'):
img = Image.open(f)
img = torch.unsqueeze(transform(img), dim=0).to(
device=torch.cuda.current_device())
h = histblock(img)
else:
print(f'Warning: File extension of {f} is not supported.')
continue
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(f)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/'
f'{samples_name}')
else:
print('The file extension of target image is not supported.')
raise NotImplementedError
return
print('\nStart training....\n')
print(f'Alpha = {alpha}')
model.set_data_src(data)
for _ in tqdm(range(num_train_steps - model.steps), mininterval=10.,
desc=f'{name}<{data}>'):
retry_call(model.train, fargs=[alpha], tries=3, exceptions=NanException)
if _ % 50 == 0:
model.print_log()
def get_args():
parser = argparse.ArgumentParser(description='Train/Test HistoGAN.')
parser.add_argument('--data', dest='data', default='./dataset/')
parser.add_argument('--results_dir', dest='results_dir',
default='./results_HistoGAN')
parser.add_argument('--models_dir', dest='models_dir', default='./models')
parser.add_argument('--target_hist', dest='target_hist', default=None)
parser.add_argument('--name', dest='name', default='histoGAN_model')
parser.add_argument('--new', dest='new', default=False)
parser.add_argument('--load_from', dest='load_from', default=-1)
parser.add_argument('--image_size', dest='image_size', default=256, type=int)
parser.add_argument('--network_capacity', dest='network_capacity', default=16,
type=int)
parser.add_argument('--transparent', dest='transparent', default=False)
parser.add_argument('--batch_size', dest='batch_size', default=2, type=int)
parser.add_argument('--gradient_accumulate_every',
dest='gradient_accumulate_every', default=8, type=int)
parser.add_argument('--num_train_steps', dest='num_train_steps',
default=1500000, type=int)
parser.add_argument('--learning_rate', dest='learning_rate', default=2e-4,
type=float)
parser.add_argument('--num_workers', dest='num_workers', default=None)
parser.add_argument('--save_every', dest='save_every', default=5000,
type=int)
parser.add_argument('--generate', dest='generate', default=False)
parser.add_argument('--save_noise_latent', dest='save_n_l', default=False)
parser.add_argument('--target_noise_file', dest='target_n', default=None)
parser.add_argument('--target_latent_file', dest='target_l', default=None)
parser.add_argument('--num_image_tiles', dest='num_image_tiles',
default=16, type=int)
parser.add_argument('--trunc_psi', dest='trunc_psi', default=0.75,
type=float)
parser.add_argument('--fp 16', dest='fp16', default=False)
parser.add_argument('--fq_layers', dest='fq_layers', default=[])
parser.add_argument('--fq_dict_size', dest='fq_dict_size', default=256,
type=int)
parser.add_argument('--attn_layers', dest='attn_layers', default=[])
parser.add_argument('--gpu', dest='gpu', default=0, type=int)
parser.add_argument('--hist_bin', dest='hist_bin', default=64, type=int)
parser.add_argument('--hist_insz', dest='hist_insz', default=150, type=int)
parser.add_argument('--hist_method', dest='hist_method',
default='inverse-quadratic')
parser.add_argument('--hist_resizing', dest='hist_resizing',
default='interpolation')
parser.add_argument('--hist_sigma', dest='hist_sigma', default=0.02,
type=float)
parser.add_argument('--alpha', dest='alpha', default=2, type=float)
parser.add_argument('--aug_prob', dest='aug_prob', default=0.0, type=float,
help='Probability of discriminator augmentation. It '
'applies operations specified in --aug_types.')
parser.add_argument('--dataset_aug_prob', dest='dataset_aug_prob',
default=0.0, type=float,
help='Probability of dataset augmentation. It applies '
'random cropping')
parser.add_argument('--aug_types', dest='aug_types',
default=['translation', 'cutout'], nargs='+',
help='Options include: translation, cutout, and color')
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
torch.cuda.set_device(args.gpu)
train_from_folder(
data=args.data,
results_dir=args.results_dir,
models_dir=args.models_dir,
name=args.name,
new=args.new,
load_from=args.load_from,
image_size=args.image_size,
network_capacity=args.network_capacity,
transparent=args.transparent,
batch_size=args.batch_size,
gradient_accumulate_every=args.gradient_accumulate_every,
num_train_steps=args.num_train_steps,
learning_rate=args.learning_rate,
num_workers=args.num_workers,
save_every=args.save_every,
generate=args.generate,
save_noise_latent=args.save_n_l,
target_noise_file=args.target_n,
target_latent_file=args.target_l,
num_image_tiles=args.num_image_tiles,
trunc_psi=args.trunc_psi,
fp16=args.fp16,
fq_layers=args.fq_layers,
fq_dict_size=args.fq_dict_size,
attn_layers=args.attn_layers,
hist_method=args.hist_method,
hist_resizing=args.hist_resizing,
hist_sigma=args.hist_sigma,
hist_bin=args.hist_bin,
hist_insz=args.hist_insz,
target_hist=args.target_hist,
alpha=args.alpha,
aug_prob=args.aug_prob,
dataset_aug_prob=args.dataset_aug_prob,
aug_types=args.aug_types
)
| [((30, 12, 30, 24), 'numpy.sqrt', 'np.sqrt', ({(30, 20, 30, 23): '(2.0)'}, {}), '(2.0)', True, 'import numpy as np\n'), ((71, 10, 96, 3), 'histoGAN.Trainer', 'Trainer', (), '', False, 'from histoGAN import Trainer, NanException\n'), ((206, 11, 206, 70), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((268, 2, 268, 33), 'torch.cuda.set_device', 'torch.cuda.set_device', ({(268, 24, 268, 32): 'args.gpu'}, {}), '(args.gpu)', False, 'import torch\n'), ((105, 10, 105, 24), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((199, 4, 199, 76), 'retry.api.retry_call', 'retry_call', (), '', False, 'from retry.api import retry_call\n'), ((108, 6, 108, 24), 'os.mkdir', 'os.mkdir', ({(108, 15, 108, 23): '"""./temp"""'}, {}), "('./temp')", False, 'import os\n'), ((110, 6, 110, 32), 'os.mkdir', 'os.mkdir', ({(110, 15, 110, 31): 'f"""./temp/{name}"""'}, {}), "(f'./temp/{name}')", False, 'import os\n'), ((113, 16, 113, 45), 'os.path.splitext', 'os.path.splitext', ({(113, 33, 113, 44): 'target_hist'}, {}), '(target_hist)', False, 'import os\n'), ((115, 13, 115, 33), 'numpy.load', 'np.load', ({(115, 21, 115, 32): 'target_hist'}, {}), '(target_hist)', True, 'import numpy as np\n'), ((107, 33, 107, 55), 'os.path.exists', 'os.path.exists', ({(107, 48, 107, 54): '"""temp"""'}, {}), "('temp')", False, 'import os\n'), ((109, 33, 109, 65), 'os.path.exists', 'os.path.exists', ({(109, 48, 109, 64): 'f"""./temp/{name}"""'}, {}), "(f'./temp/{name}')", False, 'import os\n'), ((136, 12, 136, 35), 'PIL.Image.open', 'Image.open', ({(136, 23, 136, 34): 'target_hist'}, {}), '(target_hist)', False, 'from PIL import Image\n'), ((116, 10, 116, 32), 'torch.from_numpy', 'torch.from_numpy', ({(116, 27, 116, 31): 'hist'}, {}), '(hist)', False, 'import torch\n'), ((116, 43, 116, 70), 'torch.cuda.current_device', 'torch.cuda.current_device', ({}, {}), '()', False, 'import torch\n'), ((120, 14, 120, 38), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((119, 27, 119, 51), 'numpy.log2', 'np.log2', ({(119, 35, 119, 50): 'num_image_tiles'}, {}), '(num_image_tiles)', True, 'import numpy as np\n'), ((134, 40, 134, 67), 'torch.cuda.current_device', 'torch.cuda.current_device', ({}, {}), '()', False, 'import torch\n'), ((135, 38, 135, 59), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import transforms\n'), ((138, 15, 138, 42), 'torch.cuda.current_device', 'torch.cuda.current_device', ({}, {}), '()', False, 'import torch\n'), ((143, 14, 143, 38), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((154, 15, 154, 43), 'os.path.join', 'os.path.join', ({(154, 28, 154, 39): 'target_hist', (154, 41, 154, 42): 'f'}, {}), '(target_hist, f)', False, 'import os\n'), ((142, 27, 142, 51), 'numpy.log2', 'np.log2', ({(142, 35, 142, 50): 'num_image_tiles'}, {}), '(num_image_tiles)', True, 'import numpy as np\n'), ((154, 53, 154, 76), 'os.listdir', 'os.listdir', ({(154, 64, 154, 75): 'target_hist'}, {}), '(target_hist)', False, 'import os\n'), ((159, 40, 159, 67), 'torch.cuda.current_device', 'torch.cuda.current_device', ({}, {}), '()', False, 'import torch\n'), ((160, 38, 160, 59), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import transforms\n'), ((162, 20, 162, 39), 'os.path.splitext', 'os.path.splitext', ({(162, 37, 162, 38): 'f'}, {}), '(f)', False, 'import os\n'), ((164, 17, 164, 27), 'numpy.load', 'np.load', ({(164, 25, 164, 26): 'f'}, {}), '(f)', True, 'import numpy as np\n'), ((122, 42, 122, 71), 'os.path.splitext', 'os.path.splitext', ({(122, 59, 122, 70): 'target_hist'}, {}), '(target_hist)', False, 'import os\n'), ((155, 30, 155, 58), 'os.path.join', 'os.path.join', ({(155, 43, 155, 54): 'target_hist', (155, 56, 155, 57): 'f'}, {}), '(target_hist, f)', False, 'import os\n'), ((168, 16, 168, 29), 'PIL.Image.open', 'Image.open', ({(168, 27, 168, 28): 'f'}, {}), '(f)', False, 'from PIL import Image\n'), ((178, 16, 178, 40), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((145, 42, 145, 71), 'os.path.splitext', 'os.path.splitext', ({(145, 59, 145, 70): 'target_hist'}, {}), '(target_hist)', False, 'import os\n'), ((165, 14, 165, 36), 'torch.from_numpy', 'torch.from_numpy', ({(165, 31, 165, 35): 'hist'}, {}), '(hist)', False, 'import torch\n'), ((165, 47, 165, 74), 'torch.cuda.current_device', 'torch.cuda.current_device', ({}, {}), '()', False, 'import torch\n'), ((177, 29, 177, 53), 'numpy.log2', 'np.log2', ({(177, 37, 177, 52): 'num_image_tiles'}, {}), '(num_image_tiles)', True, 'import numpy as np\n'), ((170, 19, 170, 46), 'torch.cuda.current_device', 'torch.cuda.current_device', ({}, {}), '()', False, 'import torch\n'), ((180, 44, 180, 63), 'os.path.splitext', 'os.path.splitext', ({(180, 61, 180, 62): 'f'}, {}), '(f)', False, 'import os\n')] |
Kpaubert/onlineweb4 | apps/careeropportunity/migrations/0003_careeropportunity_deadline.py | 9ac79f163bc3a816db57ffa8477ea88770d97807 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-05 18:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("careeropportunity", "0002_careeropportunity_job_type")]
operations = [
migrations.AddField(
model_name="careeropportunity",
name="deadline",
field=models.DateField(blank=True, null=True, verbose_name="søknadsfrist"),
)
]
| [((16, 18, 16, 87), 'django.db.models.DateField', 'models.DateField', (), '', False, 'from django.db import migrations, models\n')] |
grygielski/incubator-mxnet | benchmark/python/ffi/benchmark_ffi.py | 45952e21a35e32a04b7607b121085973369a42db | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import timeit
import itertools
import argparse
import os
class OpArgMngr(object):
"""Operator argument manager for storing operator workloads."""
args = {}
@staticmethod
def add_workload(funcname, *args, **kwargs):
if "_specifier" not in kwargs:
_specifier = funcname
else:
_specifier = kwargs["_specififer"]
del kwargs["_specififer"]
if _specifier in OpArgMngr.args:
raise ValueError("duplicate {}".format(_specifier))
OpArgMngr.args[_specifier] = {'args': args, 'kwargs': kwargs, 'funcname': funcname}
def generate_workloads():
array_pool = {}
shapes = []
for ndim in range(4):
shapes.extend(list(itertools.product(range(4), repeat=ndim)))
for shape in shapes:
name = 'x'.join(str(i) for i in shape)
if name in array_pool:
raise ValueError("duplicate array {}".format(name))
array_pool[name] = dnp.ones(shape)
return array_pool
def prepare_workloads():
pool = generate_workloads()
OpArgMngr.add_workload("zeros", (2, 2))
OpArgMngr.add_workload("full", (2, 2), 10)
OpArgMngr.add_workload("identity", 3)
OpArgMngr.add_workload("ones", (2, 2))
OpArgMngr.add_workload("einsum", "ii", pool['2x2'], optimize=False)
OpArgMngr.add_workload("unique", pool['1'], return_index=True, return_inverse=True, return_counts=True, axis=-1)
OpArgMngr.add_workload("dstack", (pool['2x1'], pool['2x1'], pool['2x1'], pool['2x1']))
OpArgMngr.add_workload("polyval", dnp.arange(10), pool['2x2'])
OpArgMngr.add_workload("ediff1d", pool['2x2'], pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("nan_to_num", pool['2x2'])
OpArgMngr.add_workload("tri", 2, 3, 4)
OpArgMngr.add_workload("tensordot", pool['2x2'], pool['2x2'], ((1, 0), (0, 1)))
OpArgMngr.add_workload("cumsum", pool['3x2'], axis=0, out=pool['3x2'])
OpArgMngr.add_workload("random.shuffle", pool['3'])
OpArgMngr.add_workload("equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("not_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("greater_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("maximum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("minimum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("sum", pool['2x2'], axis=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("std", pool['2x2'], axis=0, ddof=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("var", pool['2x2'], axis=0, ddof=1, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("average", pool['2x2'], weights=pool['2'], axis=1, returned=True)
OpArgMngr.add_workload("histogram", pool['2x2'], bins=10, range=(0.0, 10.0))
OpArgMngr.add_workload("add", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("cross", pool['2'], pool['2'])
OpArgMngr.add_workload("linalg.eig", pool['3x3'])
OpArgMngr.add_workload("linalg.eigh", pool['3x3'])
OpArgMngr.add_workload("linalg.det", pool['3x3'])
OpArgMngr.add_workload("linalg.slogdet", pool['3x3'])
OpArgMngr.add_workload("linalg.matrix_rank", pool['3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.svd", pool['3x3'])
OpArgMngr.add_workload("linalg.cholesky", pool['1x1'])
OpArgMngr.add_workload("linalg.qr", pool['3x3'])
OpArgMngr.add_workload("linalg.lstsq", pool['2x1'], pool['2'], rcond=None)
OpArgMngr.add_workload("linalg.eigvals", pool['1x1'])
OpArgMngr.add_workload("linalg.eigvalsh", pool['1x1'], UPLO='L')
OpArgMngr.add_workload("linalg.inv", pool['1x1'])
OpArgMngr.add_workload("linalg.pinv", pool['2x3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.solve", pool['1x1'], pool['1'])
OpArgMngr.add_workload("linalg.tensorinv", pool['1x1'], ind=2)
OpArgMngr.add_workload("linalg.norm", pool['3x3'])
OpArgMngr.add_workload("linalg.tensorsolve", pool['1x1x1'], pool['1x1x1'], (2, 0, 1))
OpArgMngr.add_workload("tile", pool['2x2'], 1)
OpArgMngr.add_workload("trace", pool['2x2'])
OpArgMngr.add_workload("transpose", pool['2x2'])
OpArgMngr.add_workload("split", pool['3x3'], (0, 1, 2), axis=1)
OpArgMngr.add_workload("vstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("argmax", pool['3x2'], axis=-1)
OpArgMngr.add_workload("argmin", pool['3x2'], axis=-1)
OpArgMngr.add_workload("atleast_1d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_2d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_3d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("argsort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("sort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("indices", dimensions=(1, 2, 3))
OpArgMngr.add_workload("subtract", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("multiply", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mod", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("remainder", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("true_divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("power", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("lcm", pool['2x2'].astype('int32'), pool['2x2'].astype('int32'))
OpArgMngr.add_workload("diff", pool['2x2'], n=1, axis=-1)
OpArgMngr.add_workload("inner", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.multinomial", n=2, pvals=[1/6.]*6, size=(2,2))
OpArgMngr.add_workload("random.rand", 3, 2)
OpArgMngr.add_workload("random.randn", 2, 2)
OpArgMngr.add_workload("nonzero", pool['2x2'])
OpArgMngr.add_workload("tril", pool['2x2'], k=0)
OpArgMngr.add_workload("random.choice", pool['2'], size=(2, 2))
OpArgMngr.add_workload("take", pool['2'], dnp.array([1,0], dtype='int64'))
OpArgMngr.add_workload("clip", pool['2x2'], 0, 1)
OpArgMngr.add_workload("expand_dims", pool['2x2'], axis=0)
OpArgMngr.add_workload("broadcast_to", pool['2x2'], (2, 2, 2))
OpArgMngr.add_workload("full_like", pool['2x2'], 2)
OpArgMngr.add_workload("zeros_like", pool['2x2'])
OpArgMngr.add_workload("ones_like", pool['2x2'])
OpArgMngr.add_workload("bitwise_and", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_xor", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_or", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("copysign", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("arctan2", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("hypot", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("ldexp", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("logical_and", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_or", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_xor", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.uniform", low=0, high=1, size=1)
OpArgMngr.add_workload("random.exponential", scale=2, size=(2,2))
OpArgMngr.add_workload("random.rayleigh", scale=2, size=(2,2))
OpArgMngr.add_workload("random.weibull", a=2, size=(2,2))
OpArgMngr.add_workload("random.pareto", a=2, size=(2,2))
OpArgMngr.add_workload("random.power", a=2, size=(2,2))
OpArgMngr.add_workload("random.logistic", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("random.gumbel", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("where", pool['2x3'], pool['2x3'], pool['2x1'])
OpArgMngr.add_workload("may_share_memory", pool['2x3'][:0], pool['2x3'][:1])
OpArgMngr.add_workload('squeeze', pool['2x2'], axis=None)
OpArgMngr.add_workload("pad", pool['2x2'], pad_width=((1,2),(1,2)), mode="constant")
OpArgMngr.add_workload("prod", pool['2x2'], axis=1, dtype="float64", keepdims=False)
OpArgMngr.add_workload("around", pool['2x2'], decimals=0)
OpArgMngr.add_workload("round", pool['2x2'], decimals=1)
OpArgMngr.add_workload("repeat", pool['2x2'], repeats=1, axis=None)
OpArgMngr.add_workload("diagflat", pool['2x2'], k=1)
OpArgMngr.add_workload("diag", pool['2x2'], k=1)
OpArgMngr.add_workload("diagonal", pool['2x2x2'], offset=-1, axis1=0, axis2=1)
OpArgMngr.add_workload("diag_indices_from", pool['2x2'])
OpArgMngr.add_workload("bincount", dnp.arange(3, dtype=int), pool['3'], minlength=4)
OpArgMngr.add_workload("percentile", pool['2x2x2'], 80, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("quantile", pool['2x2x2'], 0.8, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("all", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("any", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("roll", pool["2x2"], 1, axis=0)
OpArgMngr.add_workload("rot90", pool["2x2"], 2)
OpArgMngr.add_workload("column_stack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("hstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("triu", pool['3x3'])
OpArgMngr.add_workload("array_split", pool['2x2'], 2, axis=1)
OpArgMngr.add_workload("vsplit", pool['2x2'], 2)
OpArgMngr.add_workload("hsplit", pool['2x2'], 2)
OpArgMngr.add_workload("dsplit", pool['2x2x2'], 2)
OpArgMngr.add_workload("arange", 10)
OpArgMngr.add_workload("concatenate", (pool['1x2'], pool['1x2'], pool['1x2']), axis=0)
OpArgMngr.add_workload("append", pool['2x2'], pool['1x2'], axis=0)
OpArgMngr.add_workload("insert", pool['3x2'], 1, pool['1x1'], axis=0)
OpArgMngr.add_workload("delete", pool['3x2'], 1, axis=0)
OpArgMngr.add_workload("blackman", 12)
OpArgMngr.add_workload("eye", 5)
OpArgMngr.add_workload("hamming", 12)
OpArgMngr.add_workload("hanning", 12)
OpArgMngr.add_workload("linspace", 0, 10, 8, endpoint=False)
OpArgMngr.add_workload("logspace", 2.0, 3.0, num=4, base=2.0, dtype=onp.float32)
OpArgMngr.add_workload("matmul", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mean", pool['2x2'], axis=0, keepdims=True)
OpArgMngr.add_workload("random.gamma", 1, size=(2, 3))
OpArgMngr.add_workload("random.normal", 1, size=(2, 3))
OpArgMngr.add_workload("max", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("min", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amax", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amin", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
unary_ops = ['negative', 'reciprocal', 'abs', 'sign', 'rint', 'ceil', 'floor',
'bitwise_not', 'trunc', 'fix', 'square', 'sqrt', 'cbrt', 'exp',
'log', 'log10', 'log2', 'log1p', 'expm1', 'logical_not', 'isnan',
'isinf', 'isposinf', 'isneginf', 'isfinite', 'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan', 'degrees', 'radians', 'sinh', 'cosh',
'tanh', 'arcsinh', 'arccosh', 'arctanh'] # 'rad2deg', 'deg2rad' cannot run without tvm
for unary_op in unary_ops:
if unary_op == "bitwise_not":
OpArgMngr.add_workload(unary_op, dnp.ones((2, 2), dtype=int))
else:
OpArgMngr.add_workload(unary_op, pool['2x2'])
def benchmark_helper(f, *args, **kwargs):
number = 10000
return timeit.timeit(lambda: f(*args, **kwargs), number=number) / number
def get_op(module, funcname):
funcname = funcname.split(".")
for fname in funcname:
module = getattr(module, fname)
return module
def run_benchmark(packages):
results = {}
for (k, v) in OpArgMngr.args.items():
result = {}
for (name, package) in packages.items():
print('{}.{} running...'.format(name, k))
op = get_op(package["module"], v["funcname"])
args = [package["data"](arg) for arg in v["args"]]
kwargs = {k: package["data"](v) for (k, v) in v["kwargs"].items()}
benchmark = benchmark_helper(op, *args, **kwargs)
result[name] = benchmark
results[k] = result
return results
def show_results(results):
print("{:>24}{:>24}{:>24}".format("name", "package", "time(us)"))
for (specifier, d) in results.items():
for (k, v) in d.items():
print("{:>24}{:>24}{:>24}".format(specifier, k, v * 10 ** 6))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ffi_type')
parsed = parser.parse_args()
if parsed.ffi_type == "cython":
os.environ['MXNET_ENABLE_CYTHON'] = '1'
os.environ['MXNET_ENFORCE_CYTHON'] = '1'
elif parsed.ffi_type == "ctypes":
os.environ['MXNET_ENABLE_CYTHON'] = '0'
else:
raise ValueError("unknown ffi_type {}",format(parsed.ffi_type))
os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
import mxnet as mx
import numpy as onp
from mxnet import np as dnp
mx.npx.set_np(dtype=False)
packages = {
"onp": {
"module": onp,
"data": lambda arr: arr.asnumpy() if isinstance(arr, dnp.ndarray) else arr
},
"dnp": {
"module": dnp,
"data": lambda arr: arr
}
}
prepare_workloads()
results = run_benchmark(packages)
show_results(results)
| [((250, 13, 250, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((265, 4, 265, 30), 'mxnet.npx.set_np', 'mx.npx.set_np', (), '', True, 'import mxnet as mx\n'), ((47, 27, 47, 42), 'mxnet.np.ones', 'dnp.ones', ({(47, 36, 47, 41): 'shape'}, {}), '(shape)', True, 'from mxnet import np as dnp\n'), ((60, 38, 60, 52), 'mxnet.np.arange', 'dnp.arange', ({(60, 49, 60, 51): '(10)'}, {}), '(10)', True, 'from mxnet import np as dnp\n'), ((127, 46, 127, 77), 'mxnet.np.array', 'dnp.array', (), '', True, 'from mxnet import np as dnp\n'), ((164, 39, 164, 63), 'mxnet.np.arange', 'dnp.arange', (), '', True, 'from mxnet import np as dnp\n'), ((170, 31, 170, 68), 'mxnet.np.array', 'dnp.array', (), '', True, 'from mxnet import np as dnp\n'), ((172, 31, 172, 68), 'mxnet.np.array', 'dnp.array', (), '', True, 'from mxnet import np as dnp\n'), ((210, 45, 210, 72), 'mxnet.np.ones', 'dnp.ones', (), '', True, 'from mxnet import np as dnp\n')] |
levabd/smart-climat-daemon | first-floor.py | 8ff273eeb74fb03ea04fda11b0128fa13d35b500 | #!/usr/bin/env python3
import json
import argparse
import re
import datetime
import paramiko
import requests
# cmd ['ssh', 'smart',
# 'mkdir -p /home/levabd/smart-home-temp-humidity-monitor;
# cat - > /home/levabd/smart-home-temp-humidity-monitor/lr.json']
from miio import chuangmi_plug
from btlewrap import available_backends, BluepyBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \
MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
state = {}
f = open('/home/pi/smart-climat-daemon/ac_state.json')
state = json.load(f)
plug_type = 'chuangmi.plug.m1'
def valid_mitemp_mac(mac, pat=re.compile(r"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")):
"""Check for valid mac addresses."""
if not pat.match(mac.upper()):
raise argparse.ArgumentTypeError(
'The MAC address "{}" seems to be in the wrong format'.format(mac))
return mac
def turn_on_humidifier():
"""Turn on humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.on()
def turn_off_humidifier():
"""Turn off humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.off()
def check_if_ac_off():
"""Check if AC is turned off."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if response.json()['props']['boot'] == 0:
return True
return False
return None
def check_if_ac_cool():
"""Check if AC is turned for a automate cooling."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) or ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '001':
return False
if not response.json()['props']['wdNumber'] == 25:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def check_if_ac_heat():
"""Check if AC is turned for a automate heating."""
status_url = 'http://smart.levabd.pp.ua:2003/status/key/27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '100':
return False
if not response.json()['props']['wdNumber'] == 23:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def turn_on_heat_ac():
"""Turn on AC on a first floor for a heating if it was not."""
if (state['wasTurnedHeat'] == 1) and not state['triedTurnedHeat'] == 1:
return
heat_url = 'http://smart.levabd.pp.ua:2003/heat/key/27fbc501b51b47663e77c46816a'
ac_heat = check_if_ac_heat()
if ac_heat is not None:
if not ac_heat:
state['triedTurnedHeat'] = 1
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(heat_url)
print(response.json())
else:
if state['triedTurnedHeat'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 1
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_on_cool_ac():
"""Turn on AC on a first floor for a cooling if it was not."""
if (state['wasTurnedCool'] == 1) and not state['triedTurnedCool'] == 1:
return
cool_url = 'http://smart.levabd.pp.ua:2003/cool/key/27fbc501b51b47663e77c46816a'
ac_cool = check_if_ac_cool()
if ac_cool is not None:
if not ac_cool:
state['triedTurnedCool'] = 1
state['wasTurnedCool'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(cool_url)
print(response.json())
else:
if state['triedTurnedCool'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 1
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_off_ac():
"""Turn off AC on a first floor."""
if (state['wasTurnedOff'] == 1) and not state['triedTurnedOff'] == 1:
return
turn_url = 'http://smart.levabd.pp.ua:2003/power-off/key/27fbc501b51b47663e77c46816a'
ac_off = check_if_ac_off()
if ac_off is not None:
if not ac_off:
state['triedTurnedOff'] = 1
state['wasTurnedOff'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(turn_url)
print(response.json())
else:
if state['triedTurnedOff'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 1
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def record_temp_humid(temperature, humidity):
"""Record temperature and humidity data for web interface monitor"""
dicty = {
"temperature": temperature,
"humidity": humidity
}
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('smart.levabd.pp.ua', port = 2001, username='levabd', password='vapipu280.')
sftp = ssh.open_sftp()
with sftp.open('smart-home-temp-humidity-monitor/lr.json', 'w') as outfile:
json.dump(dicty, outfile)
ssh.close()
def poll_temp_humidity():
"""Poll data frstate['triedTurnedOff']om the sensor."""
today = datetime.datetime.today()
backend = BluepyBackend
poller = MiTempBtPoller('58:2d:34:38:c0:91', backend)
temperature = poller.parameter_value(MI_TEMPERATURE)
humidity = poller.parameter_value(MI_HUMIDITY)
print("Month: {}".format(today.month))
print("Getting data from Mi Temperature and Humidity Sensor")
print("FW: {}".format(poller.firmware_version()))
print("Name: {}".format(poller.name()))
print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE)))
print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY)))
return (today, temperature, humidity)
# scan(args):
# """Scan for sensors."""
# backend = _get_backend(args)
# print('Scanning for 10 seconds...')
# devices = mitemp_scanner.scan(backend, 10)
# devices = []
# print('Found {} devices:'.format(len(devices)))
# for device in devices:
# print(' {}'.format(device))
def list_backends(_):
"""List all available backends."""
backends = [b.__name__ for b in available_backends()]
print('\n'.join(backends))
def main():
"""Main function."""
# check_if_ac_cool()
(today, temperature, humidity) = poll_temp_humidity()
# Record temperature and humidity for monitor
record_temp_humid(temperature, humidity)
try:
if (humidity > 49) and (today.month < 10) and (today.month > 4):
turn_off_humidifier()
if (humidity < 31) and (today.month < 10) and (today.month > 4):
turn_on_humidifier()
if (humidity < 31) and ((today.month > 9) or (today.month < 5)):
turn_on_humidifier()
if (humidity > 49) and ((today.month > 9) or (today.month < 5)):
turn_off_humidifier()
# Prevent Sleep of Xiaomi Smart Plug
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=0,
lazy_discover=True,
model='chuangmi.plug.m1')
print(hummidifier_plug.status())
except Exception:
print("Can not connect to humidifier")
# clear env at night
if today.hour == 4:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
if (today.hour > -1) and (today.hour < 7):
turn_off_ac()
if (temperature > 26.4) and (today.month < 6) and (today.month > 4) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 26.4) and (today.month < 10) and (today.month > 8) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 27.3) and (today.month < 9) and (today.month > 5) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature < 23.5) and (today.month < 10) and (today.month > 4):
turn_off_ac()
# _if (temperature < 20) and ((today.month > 9) or (today.month < 5)) and (today.hour < 24) and (today.hour > 9):
# turn_on_heat_ac()
if (temperature > 22) and ((today.month > 9) or (today.month < 5)):
turn_off_ac()
if __name__ == '__main__':
main()
| [((21, 8, 21, 20), 'json.load', 'json.load', ({(21, 18, 21, 19): 'f'}, {}), '(f)', False, 'import json\n'), ((24, 30, 24, 116), 're.compile', 're.compile', ({(24, 41, 24, 115): '"""[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}"""'}, {}), "(\n '[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}')", False, 'import re\n'), ((33, 23, 39, 24), 'miio.chuangmi_plug.ChuangmiPlug', 'chuangmi_plug.ChuangmiPlug', (), '', False, 'from miio import chuangmi_plug\n'), ((45, 23, 51, 24), 'miio.chuangmi_plug.ChuangmiPlug', 'chuangmi_plug.ChuangmiPlug', (), '', False, 'from miio import chuangmi_plug\n'), ((58, 15, 58, 57), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((70, 15, 70, 57), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((89, 15, 89, 57), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((189, 10, 189, 30), 'paramiko.SSHClient', 'paramiko.SSHClient', ({}, {}), '()', False, 'import paramiko\n'), ((202, 12, 202, 37), 'datetime.datetime.today', 'datetime.datetime.today', ({}, {}), '()', False, 'import datetime\n'), ((204, 13, 204, 57), 'mitemp_bt.mitemp_bt_poller.MiTempBtPoller', 'MiTempBtPoller', ({(204, 28, 204, 47): '"""58:2d:34:38:c0:91"""', (204, 49, 204, 56): 'backend'}, {}), "('58:2d:34:38:c0:91', backend)", False, 'from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY\n'), ((190, 36, 190, 60), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ({}, {}), '()', False, 'import paramiko\n'), ((195, 8, 195, 33), 'json.dump', 'json.dump', ({(195, 18, 195, 23): 'dicty', (195, 25, 195, 32): 'outfile'}, {}), '(dicty, outfile)', False, 'import json\n'), ((252, 27, 258, 37), 'miio.chuangmi_plug.ChuangmiPlug', 'chuangmi_plug.ChuangmiPlug', (), '', False, 'from miio import chuangmi_plug\n'), ((117, 23, 117, 45), 'requests.get', 'requests.get', ({(117, 36, 117, 44): 'heat_url'}, {}), '(heat_url)', False, 'import requests\n'), ((143, 23, 143, 45), 'requests.get', 'requests.get', ({(143, 36, 143, 44): 'cool_url'}, {}), '(cool_url)', False, 'import requests\n'), ((169, 23, 169, 45), 'requests.get', 'requests.get', ({(169, 36, 169, 44): 'turn_url'}, {}), '(turn_url)', False, 'import requests\n'), ((229, 36, 229, 56), 'btlewrap.available_backends', 'available_backends', ({}, {}), '()', False, 'from btlewrap import available_backends, BluepyBackend\n'), ((272, 12, 272, 34), 'json.dump', 'json.dump', ({(272, 22, 272, 27): 'state', (272, 29, 272, 33): 'file'}, {}), '(state, file)', False, 'import json\n'), ((116, 16, 116, 38), 'json.dump', 'json.dump', ({(116, 26, 116, 31): 'state', (116, 33, 116, 37): 'file'}, {}), '(state, file)', False, 'import json\n'), ((142, 16, 142, 38), 'json.dump', 'json.dump', ({(142, 26, 142, 31): 'state', (142, 33, 142, 37): 'file'}, {}), '(state, file)', False, 'import json\n'), ((168, 16, 168, 38), 'json.dump', 'json.dump', ({(168, 26, 168, 31): 'state', (168, 33, 168, 37): 'file'}, {}), '(state, file)', False, 'import json\n'), ((128, 20, 128, 42), 'json.dump', 'json.dump', ({(128, 30, 128, 35): 'state', (128, 37, 128, 41): 'file'}, {}), '(state, file)', False, 'import json\n'), ((154, 20, 154, 42), 'json.dump', 'json.dump', ({(154, 30, 154, 35): 'state', (154, 37, 154, 41): 'file'}, {}), '(state, file)', False, 'import json\n'), ((180, 20, 180, 42), 'json.dump', 'json.dump', ({(180, 30, 180, 35): 'state', (180, 37, 180, 41): 'file'}, {}), '(state, file)', False, 'import json\n')] |
Optimist-Prime/QML-for-MNIST-classification | reservior_classification.py | 7513b3faa548166dba3df927a248e8c7f1ab2a15 | import pickle
from sklearn.neural_network import MLPClassifier
train = pickle.load(open('train_pca_reservoir_output_200samples.pickle','rb'))
test = pickle.load(open('test_pca_reservoir_output_50samples.pickle','rb'))
train_num = 200
test_num = 50
mlp = MLPClassifier(hidden_layer_sizes=(2000,), max_iter=100, alpha=1e-5,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1, batch_size= 20)
mlp.fit(train[0], train[1][:train_num])
print("Training set score: %f" % mlp.score(train[0], train[1][:train_num]))
print("Test set score: %f" % mlp.score(test[0], test[1][:test_num]))
| [((10, 6, 12, 58), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', (), '', False, 'from sklearn.neural_network import MLPClassifier\n')] |
delmarrerikaine/LPG-PCA | util.py | deb631ee2c4c88190ce4204fcbc0765ae5cd8f53 | import numpy as np
import pandas as pd
from skimage import io
import skimage.measure as measure
import os
from lpg_pca_impl import denoise
def getNoisedImage(originalImage, variance):
# return random_noise(originalImage, mode='gaussian', var=variance)
np.random.seed(42)
noise = np.random.normal(size=originalImage.shape)
noise = noise/np.sqrt(np.power(noise, 2).mean())
noisedImage = originalImage + variance*noise
return noisedImage
def clip(img):
img = np.minimum(np.ones(img.shape), img)
img = np.maximum(np.zeros(img.shape), img)
return img
def readImg(path):
return io.imread(path, as_gray=True).astype('float64')/255.0
def showImg(img, name):
print(name)
img = clip(img)
io.imshow((img*255.0).astype('uint8'))
def saveImg(img, path):
img = clip(img)
io.imsave(path, (img*255.0).astype('uint8'))
def compare_psnr(img1, img2):
return measure.compare_psnr(img1, img2)
def compare_ssim(img1, img2):
return measure.compare_ssim(img1, img2)
def generate_images(img_name='mri'):
experiments_folder = 'experiments'
noise_variances = [10, 20, 30, 40]
for noise_variance in noise_variances:
corrected_noise_variance = noise_variance / 255.0
original_img = readImg(os.path.join('images', img_name + '.png'))
noised_img = getNoisedImage(original_img, corrected_noise_variance)
noised_file_name = img_name + '_noised_' + str(noise_variance) + '.png'
saveImg(noised_img, os.path.join(experiments_folder, noised_file_name))
print(noised_file_name + ' started.')
denoised_img = denoise(noised_img, noise_variance)
denoised_file_name = img_name + '_denoised_' + str(noise_variance) + '.png'
saveImg(denoised_img, os.path.join(experiments_folder, denoised_file_name))
print(denoised_file_name + ' finished.')
print("noised PSNR: " + str(compare_psnr(original_img, noised_img)) + ", SSIM: " + str(compare_ssim(original_img, noised_img)))
print("denoised PSNR: " + str(compare_psnr(original_img, denoised_img)) + ", SSIM: " + str(compare_ssim(original_img, denoised_img)))
def generate_latex_tables():
df = pd.read_csv('data.csv')
df = df.round(2)
image_texts = np.array([])
temp_directory = os.path.join(os.path.dirname(__file__), 'temp')
if not os.path.exists(temp_directory):
os.makedirs(temp_directory)
for image_name in list(set(df['image_name'])):
image_df = df[df['image_name'] == image_name]
image_df['denoise_lpg_pca'] = image_df['denoise_psnr_lpg_pca'].map(str) + '(' + image_df['denoise_ssim_lpg_pca'].map(str) + ')'
image_df['denoise_mf'] = image_df['denoise_psnr_mf'].map(str) + '(' + image_df['denoise_ssim_mf'].map(str) + ')'
image_df['denoise_nlm'] = image_df['denoise_psnr_nlm'].map(str) + '(' + image_df['denoise_ssim_nlm'].map(str) + ')'
image_df['denoise_bm3d'] = image_df['denoise_psnr_bm3d'].map(str) + '(' + image_df['denoise_ssim_bm3d'].map(str) + ')'
image_df = image_df[['sigma', 'denoise_lpg_pca', 'denoise_mf', 'denoise_nlm', 'denoise_bm3d']]
image_df['sigma'] = image_df['sigma'].map(int)
image_df.columns = ['sigma', 'LPG-PCA', 'MF', "NLM", 'BM3D']
path = os.path.join(temp_directory, image_name + '.tex')
image_df.to_latex(path, index=False, column_format='lrrrr')
with open(path, 'r') as file:
image_text = file.read()
image_text = image_text.replace(' ', '').replace(r'\toprule', r'\toprule &&' + image_name + r'\\ \midrule')
image_text = r'\noindent\begin{minipage}{.5\linewidth}' + '\n' + image_text + '\n' + r'\end{minipage}'
image_text = image_text.replace('\n\n', '\n').replace('sigma&', '$\\sigma$&')
image_texts = np.append(image_texts, image_text)
os.remove(path)
result = '\n'.join(image_texts)
filename = 'tables.tex'
with open(filename, "w+") as file:
file.write(result)
if(len(os.listdir(temp_directory))) == 0:
os.rmdir(temp_directory)
| [((11, 4, 11, 22), 'numpy.random.seed', 'np.random.seed', ({(11, 19, 11, 21): '(42)'}, {}), '(42)', True, 'import numpy as np\n'), ((12, 12, 12, 54), 'numpy.random.normal', 'np.random.normal', (), '', True, 'import numpy as np\n'), ((40, 11, 40, 43), 'skimage.measure.compare_psnr', 'measure.compare_psnr', ({(40, 32, 40, 36): 'img1', (40, 38, 40, 42): 'img2'}, {}), '(img1, img2)', True, 'import skimage.measure as measure\n'), ((44, 11, 44, 43), 'skimage.measure.compare_ssim', 'measure.compare_ssim', ({(44, 32, 44, 36): 'img1', (44, 38, 44, 42): 'img2'}, {}), '(img1, img2)', True, 'import skimage.measure as measure\n'), ((73, 9, 73, 32), 'pandas.read_csv', 'pd.read_csv', ({(73, 21, 73, 31): '"""data.csv"""'}, {}), "('data.csv')", True, 'import pandas as pd\n'), ((76, 18, 76, 30), 'numpy.array', 'np.array', ({(76, 27, 76, 29): '[]'}, {}), '([])', True, 'import numpy as np\n'), ((19, 21, 19, 39), 'numpy.ones', 'np.ones', ({(19, 29, 19, 38): 'img.shape'}, {}), '(img.shape)', True, 'import numpy as np\n'), ((20, 21, 20, 40), 'numpy.zeros', 'np.zeros', ({(20, 30, 20, 39): 'img.shape'}, {}), '(img.shape)', True, 'import numpy as np\n'), ((62, 23, 62, 58), 'lpg_pca_impl.denoise', 'denoise', ({(62, 31, 62, 41): 'noised_img', (62, 43, 62, 57): 'noise_variance'}, {}), '(noised_img, noise_variance)', False, 'from lpg_pca_impl import denoise\n'), ((78, 34, 78, 59), 'os.path.dirname', 'os.path.dirname', ({(78, 50, 78, 58): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((79, 11, 79, 41), 'os.path.exists', 'os.path.exists', ({(79, 26, 79, 40): 'temp_directory'}, {}), '(temp_directory)', False, 'import os\n'), ((80, 8, 80, 35), 'os.makedirs', 'os.makedirs', ({(80, 20, 80, 34): 'temp_directory'}, {}), '(temp_directory)', False, 'import os\n'), ((92, 15, 92, 64), 'os.path.join', 'os.path.join', ({(92, 28, 92, 42): 'temp_directory', (92, 44, 92, 63): "image_name + '.tex'"}, {}), "(temp_directory, image_name + '.tex')", False, 'import os\n'), ((102, 8, 102, 23), 'os.remove', 'os.remove', ({(102, 18, 102, 22): 'path'}, {}), '(path)', False, 'import os\n'), ((111, 8, 111, 32), 'os.rmdir', 'os.rmdir', ({(111, 17, 111, 31): 'temp_directory'}, {}), '(temp_directory)', False, 'import os\n'), ((54, 31, 54, 72), 'os.path.join', 'os.path.join', ({(54, 44, 54, 52): '"""images"""', (54, 54, 54, 71): "img_name + '.png'"}, {}), "('images', img_name + '.png')", False, 'import os\n'), ((59, 28, 59, 78), 'os.path.join', 'os.path.join', ({(59, 41, 59, 59): 'experiments_folder', (59, 61, 59, 77): 'noised_file_name'}, {}), '(experiments_folder, noised_file_name)', False, 'import os\n'), ((65, 30, 65, 82), 'os.path.join', 'os.path.join', ({(65, 43, 65, 61): 'experiments_folder', (65, 63, 65, 81): 'denoised_file_name'}, {}), '(experiments_folder, denoised_file_name)', False, 'import os\n'), ((100, 26, 100, 60), 'numpy.append', 'np.append', ({(100, 36, 100, 47): 'image_texts', (100, 49, 100, 59): 'image_text'}, {}), '(image_texts, image_text)', True, 'import numpy as np\n'), ((110, 11, 110, 37), 'os.listdir', 'os.listdir', ({(110, 22, 110, 36): 'temp_directory'}, {}), '(temp_directory)', False, 'import os\n'), ((25, 11, 25, 40), 'skimage.io.imread', 'io.imread', (), '', False, 'from skimage import io\n'), ((13, 26, 13, 44), 'numpy.power', 'np.power', ({(13, 35, 13, 40): 'noise', (13, 42, 13, 43): '(2)'}, {}), '(noise, 2)', True, 'import numpy as np\n')] |
kringen/wingnut | ui/ui.py | 73be4f8393720ff0932ab069543e5f2d2308296d | import redis
from rq import Queue, Connection
from flask import Flask, render_template, Blueprint, jsonify, request
import tasks
import rq_dashboard
from wingnut import Wingnut
app = Flask(
__name__,
template_folder="./templates",
static_folder="./static",
)
app.config.from_object(rq_dashboard.default_settings)
app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq")
@app.route("/", methods=["GET"])
def home():
return render_template("main/home.html")
@app.route("/tasks", methods=["POST"])
def run_task():
task_type = request.form["type"]
with Connection(redis.from_url("redis://localhost:6379")):
q = Queue()
task = q.enqueue(tasks.create_task, task_type)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@app.route("/mode", methods=["POST"])
def set_mode():
task_type = request.form["type"]
with Connection(redis.from_url("redis://localhost:6379")):
q = Queue("mode")
task = q.enqueue(tasks.set_mode, task_type)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@app.route("/tasks/<task_id>", methods=["GET"])
def get_status(task_id):
with Connection(redis.from_url("redis://localhost:6379")):
q = Queue()
task = q.fetch_job(task_id)
if task:
response_object = {
"status": "success",
"data": {
"task_id": task.get_id(),
"task_status": task.get_status(),
"task_result": task.result,
},
}
else:
response_object = {"status": "error"}
return jsonify(response_object)
@app.route("/configuration", methods=["GET"])
def get_configuration():
wingnut = Wingnut()
response_object = {
"status": "success",
"data": {
"servoPin": wingnut.servoPin,
"leftMotorPin1": wingnut.leftMotorPin1,
"leftMotorPin1": wingnut.leftMotorPin2,
"leftMotorEnablePin": wingnut.leftMotorEnablePin,
"rightMotorPin1": wingnut.rightMotorPin1,
"rightMotorPin1": wingnut.rightMotorPin2,
"rightMotorEnablePin": wingnut.rightMotorEnablePin,
"sonarTriggerPin": wingnut.sonarTriggerPin,
"sonarEchoPin": wingnut.sonarEchoPin
}
}
return jsonify(response_object)
@app.route("/diagnostics", methods=["GET"])
def get_diagnostics():
r = redis.Redis()
diagnostics = {}
diagnostics["power_level"] = r.get("power_level").decode("utf-8")
diagnostics["temperature"] = r.get("temperature").decode("utf-8")
diagnostics["free_memory_mb"] = r.get("free_memory_mb").decode("utf-8")
diagnostics["free_disk_space"] = r.get("free_disk_space").decode("utf-8")
response_object = {
"status": "success",
"data": {
"diagnostics": diagnostics
}
}
return jsonify(response_object)
if __name__ == "__main__":
app.run(host="0.0.0.0",debug=1)
| [((8, 6, 12, 5), 'flask.Flask', 'Flask', (), '', False, 'from flask import Flask, render_template, Blueprint, jsonify, request\n'), ((19, 11, 19, 44), 'flask.render_template', 'render_template', ({(19, 27, 19, 43): '"""main/home.html"""'}, {}), "('main/home.html')", False, 'from flask import Flask, render_template, Blueprint, jsonify, request\n'), ((65, 11, 65, 35), 'flask.jsonify', 'jsonify', ({(65, 19, 65, 34): 'response_object'}, {}), '(response_object)', False, 'from flask import Flask, render_template, Blueprint, jsonify, request\n'), ((69, 14, 69, 23), 'wingnut.Wingnut', 'Wingnut', ({}, {}), '()', False, 'from wingnut import Wingnut\n'), ((84, 11, 84, 35), 'flask.jsonify', 'jsonify', ({(84, 19, 84, 34): 'response_object'}, {}), '(response_object)', False, 'from flask import Flask, render_template, Blueprint, jsonify, request\n'), ((88, 8, 88, 21), 'redis.Redis', 'redis.Redis', ({}, {}), '()', False, 'import redis\n'), ((100, 11, 100, 35), 'flask.jsonify', 'jsonify', ({(100, 19, 100, 34): 'response_object'}, {}), '(response_object)', False, 'from flask import Flask, render_template, Blueprint, jsonify, request\n'), ((25, 12, 25, 19), 'rq.Queue', 'Queue', ({}, {}), '()', False, 'from rq import Queue, Connection\n'), ((33, 11, 33, 35), 'flask.jsonify', 'jsonify', ({(33, 19, 33, 34): 'response_object'}, {}), '(response_object)', False, 'from flask import Flask, render_template, Blueprint, jsonify, request\n'), ((39, 12, 39, 25), 'rq.Queue', 'Queue', ({(39, 18, 39, 24): '"""mode"""'}, {}), "('mode')", False, 'from rq import Queue, Connection\n'), ((47, 11, 47, 35), 'flask.jsonify', 'jsonify', ({(47, 19, 47, 34): 'response_object'}, {}), '(response_object)', False, 'from flask import Flask, render_template, Blueprint, jsonify, request\n'), ((52, 12, 52, 19), 'rq.Queue', 'Queue', ({}, {}), '()', False, 'from rq import Queue, Connection\n'), ((24, 20, 24, 60), 'redis.from_url', 'redis.from_url', ({(24, 35, 24, 59): '"""redis://localhost:6379"""'}, {}), "('redis://localhost:6379')", False, 'import redis\n'), ((38, 20, 38, 60), 'redis.from_url', 'redis.from_url', ({(38, 35, 38, 59): '"""redis://localhost:6379"""'}, {}), "('redis://localhost:6379')", False, 'import redis\n'), ((51, 20, 51, 60), 'redis.from_url', 'redis.from_url', ({(51, 35, 51, 59): '"""redis://localhost:6379"""'}, {}), "('redis://localhost:6379')", False, 'import redis\n')] |
Openmail/pytaboola | pytaboola/__init__.py | ed71b3b9c5fb2e4452d4b6d40aec1ff037dd5436 | from pytaboola.client import TaboolaClient | [] |
omi28/ga-learner-dst-repo | omkar/code.py | 396c35ea56028717a96aed6ca771e39ebf68dc5b | # --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#New record
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
data.shape
cenus=np.concatenate((new_record,data),axis=0)
cenus.shape
print(cenus)
age=cenus[:,0]
max_age=age.max()
print(max_age)
min_age=age.min()
mean_age=np.mean(age)
age_std=np.std(age)
race=cenus[:,2]
print(race)
race_0=(race==0)
len_0=len(race[race_0])
print(len_0)
race_1=(race==1)
len_1=len(race[race_1])
race_2=(race==2)
race_3=(race==3)
race_4=(race==4)
len_2=len(race[race_2])
len_3=len(race[race_3])
len_4=len(race[race_4])
minority_race=3
print(minority_race)
senior_citizen=(age>60)
working_hour_sum=sum(cenus[:,6][senior_citizen])
print(working_hour_sum)
senior_citizen_len=len(age[senior_citizen])
avg_working_hours=working_hour_sum/senior_citizen_len
avg_working_hours=round(avg_working_hours,2)
education_num=cenus[:,1]
print(education_num)
high=education_num>10
#high=education_num[high]
print(high)
low=education_num<=10
#low=education_num[low]
print(low)
INCOME=cenus[:,7][high]
print(INCOME)
print(np.mean(INCOME))
avg_pay_high=round(np.mean(INCOME),2)
print(avg_pay_high)
LOW_AVG=cenus[:,7][low]
avg_pay_low=round(np.mean(LOW_AVG),2)
print(avg_pay_low)
#Code starts here
| [((6, 0, 6, 33), 'warnings.filterwarnings', 'warnings.filterwarnings', ({(6, 24, 6, 32): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((12, 7, 12, 56), 'numpy.genfromtxt', 'np.genfromtxt', (), '', True, 'import numpy as np\n'), ((14, 6, 14, 46), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((21, 9, 21, 21), 'numpy.mean', 'np.mean', ({(21, 17, 21, 20): 'age'}, {}), '(age)', True, 'import numpy as np\n'), ((22, 8, 22, 19), 'numpy.std', 'np.std', ({(22, 15, 22, 18): 'age'}, {}), '(age)', True, 'import numpy as np\n'), ((56, 6, 56, 21), 'numpy.mean', 'np.mean', ({(56, 14, 56, 20): 'INCOME'}, {}), '(INCOME)', True, 'import numpy as np\n'), ((57, 19, 57, 34), 'numpy.mean', 'np.mean', ({(57, 27, 57, 33): 'INCOME'}, {}), '(INCOME)', True, 'import numpy as np\n'), ((60, 18, 60, 34), 'numpy.mean', 'np.mean', ({(60, 26, 60, 33): 'LOW_AVG'}, {}), '(LOW_AVG)', True, 'import numpy as np\n')] |
jchampio/apache-websocket | test/present.py | 18ad4ae2fc99381b8d75785f492a479f789b322b | #! /usr/bin/env python
#
# Presents the results of an Autobahn TestSuite run in TAP format.
#
# Copyright 2015 Jacob Champion
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import StrictVersion
import json
import os.path
import sys
import textwrap
import yamlish
def filter_report(report):
"""Filters a test report dict down to only the interesting keys."""
INTERESTING_KEYS = [
'behavior',
'behaviorClose',
'expected',
'received',
'expectedClose',
'remoteCloseCode'
]
return { key: report[key] for key in INTERESTING_KEYS }
def prepare_description(report):
"""Constructs a description from a test report."""
raw = report['description']
# Wrap to at most 80 characters.
wrapped = textwrap.wrap(raw, 80)
description = wrapped[0]
if len(wrapped) > 1:
# If the text is longer than one line, add an ellipsis.
description += '...'
return description
#
# MAIN
#
# Read the index.
results_dir = 'test-results'
with open(os.path.join(results_dir, 'index.json'), 'r') as index_file:
index = json.load(index_file)['AutobahnPython']
# Sort the tests by numeric ID so we print them in a sane order.
test_ids = list(index.keys())
test_ids.sort(key=StrictVersion)
# Print the TAP header.
print('TAP version 13')
print('1..{0!s}'.format(len(test_ids)))
count = 0
skipped_count = 0
failed_count = 0
for test_id in test_ids:
count += 1
passed = True
skipped = False
report = None
result = index[test_id]
# Try to get additional information from this test's report file.
try:
path = os.path.join(results_dir, result['reportfile'])
with open(path, 'r') as f:
report = json.load(f)
description = prepare_description(report)
except Exception as e:
description = '[could not load report file: {0!s}]'.format(e)
test_result = result['behavior']
close_result = result['behaviorClose']
# Interpret the result for this test.
if test_result != 'OK' and test_result != 'INFORMATIONAL':
if test_result == 'UNIMPLEMENTED':
skipped = True
else:
passed = False
elif close_result != 'OK' and close_result != 'INFORMATIONAL':
passed = False
# Print the TAP result.
print(u'{0} {1} - [{2}] {3}{4}'.format('ok' if passed else 'not ok',
count,
test_id,
description,
' # SKIP unimplemented' if skipped
else ''))
# Print a YAMLish diagnostic for failed tests.
if report and not passed:
output = filter_report(report)
diagnostic = yamlish.dumps(output)
for line in diagnostic.splitlines():
print(' ' + line)
if not passed:
failed_count += 1
if skipped:
skipped_count += 1
# Print a final result.
print('# Autobahn|TestSuite {0}'.format('PASSED' if not failed_count else 'FAILED'))
print('# total {0}'.format(count))
print('# passed {0}'.format(count - failed_count - skipped_count))
print('# skipped {0}'.format(skipped_count))
print('# failed {0}'.format(failed_count))
exit(0 if not failed_count else 1)
| [((46, 14, 46, 36), 'textwrap.wrap', 'textwrap.wrap', ({(46, 28, 46, 31): 'raw', (46, 33, 46, 35): '80'}, {}), '(raw, 80)', False, 'import textwrap\n'), ((62, 12, 62, 33), 'json.load', 'json.load', ({(62, 22, 62, 32): 'index_file'}, {}), '(index_file)', False, 'import json\n'), ((118, 21, 118, 42), 'yamlish.dumps', 'yamlish.dumps', ({(118, 35, 118, 41): 'output'}, {}), '(output)', False, 'import yamlish\n'), ((88, 21, 88, 33), 'json.load', 'json.load', ({(88, 31, 88, 32): 'f'}, {}), '(f)', False, 'import json\n')] |
WEBZCC/softwarecollections | softwarecollections/scls/migrations/0004_other_repos_default_values.py | efee5c3c276033d526a0cdba504d43deff71581e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scls', '0003_other_repos'),
]
operations = [
migrations.AlterField(
model_name='otherrepo',
name='arch',
field=models.CharField(default='', blank=True, verbose_name='Architecture', max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='command',
field=models.TextField(default='', blank=True, verbose_name='Command'),
),
migrations.AlterField(
model_name='otherrepo',
name='icon',
field=models.CharField(default='', blank=True, verbose_name='Icon', choices=[('centos', 'centos'), ('epel', 'epel'), ('fedora', 'fedora'), ('rhel', 'rhel')], max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='version',
field=models.CharField(default='', blank=True, verbose_name='Distribution version', max_length=20),
),
]
| [((17, 18, 17, 102), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((22, 18, 22, 82), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((27, 18, 27, 184), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((32, 18, 32, 110), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')] |
davidgjy/arch-lib | python/Excel/enumerateCells.py | b4402b96d2540995a848e6c5f600b2d99847ded6 | import openpyxl
wb = openpyxl.load_workbook('example.xlsx')
sheet = wb.get_sheet_by_name('Sheet1')
rows = sheet.get_highest_row()
cols = sheet.get_highest_column()
for i in range(1, rows + 1):
for j in range(1, cols + 1):
print('%s: %s' % (sheet.cell(row=i, column=j).coordinate, sheet.cell(row=i, column=j).value))
print('---------------------------------------------')
| [((3, 5, 3, 43), 'openpyxl.load_workbook', 'openpyxl.load_workbook', ({(3, 28, 3, 42): '"""example.xlsx"""'}, {}), "('example.xlsx')", False, 'import openpyxl\n')] |
BLSQ/iaso-copy | plugins/polio/migrations/0029_campaign_country.py | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | # Generated by Django 3.1.13 on 2021-10-04 11:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("iaso", "0107_auto_20211001_1845"),
("polio", "0028_remove_campaign_budget_first_draft_submitted_at"),
]
operations = [
migrations.AddField(
model_name="campaign",
name="country",
field=models.ForeignKey(
blank=True,
help_text="Country for campaign, set automatically from initial_org_unit",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="campaigns_country",
to="iaso.orgunit",
),
),
]
| [((18, 18, 25, 13), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
aarana14/CurrencyExchange | CurrencyExchange.py | e3f35c1481acf19683a74a41509b1dd37ae48594 | #import external libraries used in code
import requests, json
import pycountry
print('Currency Exchange')
currencies = []
def findCurrency():
#Finds all avaliable currencies
allCurrency = (list(pycountry.currencies))
for x in allCurrency:
y = str(x)
y = y[18:21]
#Adds the value of their ISO to the "currencies" list
currencies.append(y)
#Organizes all values in "currency" list
currecyDisplay = ''
inline = 0
for cs in currencies:
currecyDisplay += cs + ' | '
inline += 1
#Allows up to 26 ISOs to be in one line
if inline >= 26:
currecyDisplay += '\n '
inline = 0
#Displays all currency ISOs to user
print('Avaliable Currencies:\n',currecyDisplay)
def help():
#Ask user if they need help
questions = input('Type ? for help or Enter to continue: ')
#If user inputs "?" run help procedure
if questions == '?':
#Display information order
print('--------\nCurrency Exchange Help\nISO currency codes are three-letter alphabetic codes that represent the various currencies\n\nCurrency ISO:\nCurrency Name:\n--------')
#Obtains information of all currencies
allCurrency = (list(pycountry.currencies))
#For each currency obtain the ISO and the name of currency
#Display ISO and Data
for x in allCurrency:
y = str(x)
w = y[18:21]
n = int(y.index(',', y.index(',') + 1))
z = y[30:n-1]
print(w)
print(z + '\n')
print('--------\n')
#Else user does not input "?" continue program
else:
pass
def userInput():
#Program try asking user for data input
try:
fromCurrency = input('From (ISO): ').upper()
toCurrency = input('To (ISO): ').upper()
currencyAmount = input('Amount: ')
currencyAmount = int(currencyAmount.replace(',', ''))
#If data inputed is not the correct type of data inform user
except ValueError:
print('Amount Is A Number Value')
#Return inputed data
return currencyAmount, fromCurrency, toCurrency
def checkInfo(fromC, toC, currencyA, check):
#"validCurrency" value increses as data inputed if verified
validCurrency = 0
#Check if inputed ISO is valid
#If values are valid the vlue of "validCurrency" is increased
for givenCurrencies in currencies:
if fromC == givenCurrencies:
validCurrency += 1
for givenCurrencies in currencies:
if toC == givenCurrencies:
validCurrency += 1
#Check if "validCurrency" meets necessary verification value
#Check if "validCurrency" is not 2 (Data is not valid) or inputed amount data is not the correct value
if validCurrency != 2 or type(currencyA) != int:
#Let user know data is invalid
print('Information Invalid\n')
#Ask user if they need help
help()
#Reset "validCurrency"
validCurrency = 0
#Set "check" as False
checks = False
#If type of data is correct and valid "check" is set to True
else:
checks = True
return fromC, toC, currencyA, checks
def dataInput():
#Data has not been checked yet, therefore "check" is False
check = False
#While the data is not valid or not checked repeat data input and data check
while check == False:
currencyAmount, fromCurrency, toCurrency = userInput()
fromC, toC, currencyA, check = checkInfo(fromCurrency, toCurrency, currencyAmount, check)
#Once data is valid and checked return values
return fromC, toC, currencyA
def userData():
#No data if the information provided is correct
correctInfo = ''
#While the user does not approve of data, repeat data input and data check
while correctInfo != 'y':
fromC, toC, currencyA = dataInput()
#Display data user has inputed after being checked and validated
print('\nFrom:',fromC)
print('To:',toC)
print('Amount:', currencyA)
#Ask user if the data provided is correct
correctInfo = input('Is the information correct (y/n)?: ').lower()
print('')
help()
#Once data is approved by user, return values
return currencyA, fromC, toC
def realTimeRate(from_currency, to_currency):
#API key provided by Alpha Vanatage
api_key = "1RU6IZY5D9UIISJK"
#Define "url" where data is stored
#"url" varies from user selected data
url = ('https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=%s&to_currency=%s&apikey=%s' % (from_currency, to_currency, api_key))
#Get response from reqest of "url"
req = requests.get(url)
#Obtain json format and set data for python to read
#"Result" has nested dictionaries
result = req.json()
#Display exchange rate information to user
print("Realtime Currency Exchange Rate for",
result["Realtime Currency Exchange Rate"]
["2. From_Currency Name"], "to",
result["Realtime Currency Exchange Rate"]
["4. To_Currency Name"], "is",
result["Realtime Currency Exchange Rate"]
['5. Exchange Rate'], to_currency)
#Return the value of exchange
return float(result["Realtime Currency Exchange Rate"]
['5. Exchange Rate'])
def completeExchange(rate, cAmount, fCurrency, tCurrency):
#Total of the "to" currency is the rate times the amount of the "from" currency
total = rate * cAmount
end = ' '
#Maintain program Running until user has inputed the Enter key
while end == ' ':
print('\n%s %s is %.2f %s' % (cAmount, fCurrency, total, tCurrency))
end = input('Press Enter To Close')
if __name__ == "__main__":
findCurrency()
help()
currencyAmount, fromCurrency, toCurrency = userData()
rate = realTimeRate(fromCurrency, toCurrency)
completeExchange(rate, currencyAmount, fromCurrency, toCurrency)
| [((127, 10, 127, 27), 'requests.get', 'requests.get', ({(127, 23, 127, 26): 'url'}, {}), '(url)', False, 'import requests, json\n')] |
knuu/competitive-programming | atcoder/corp/codethxfes2014a_e.py | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | r, c, m = map(int, input().split())
n = int(input())
op = [list(map(lambda x: int(x) - 1, input().split())) for _ in range(n)]
board = [[0 for _ in range(c)] for _ in range(r)]
for ra, rb, ca, cb in op:
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
board[j][k] += 1
cnt = 0
for i in range(r):
for j in range(c):
board[i][j] %= 4
if board[i][j] == 0:
cnt += 1
for i in range(n):
ra, rb, ca, cb = op[i]
cnti = cnt
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
if board[j][k] == 0:
cnti -= 1
elif board[j][k] == 1:
cnti += 1
if cnti == m:
print(i + 1)
| [] |
QU-XIAO/yambopy | scripts/analyse_bse.py | ff65a4f90c1bfefe642ebc61e490efe781709ff9 | # Copyright (C) 2018 Alexandre Morlet, Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yambopy
#
from __future__ import print_function
from builtins import range
from yambopy import *
from qepy import *
import json
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
import operator
def analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack ):
"""
Using ypp, you can study the convergence of BSE calculations in 2 ways:
Create a .png of all absorption spectra relevant to the variable you study
Look at the eigenvalues of the first n "bright" excitons (given a threshold intensity)
The script reads from <folder> all results from <variable> calculations for processing.
The resulting pictures and data files are saved in the ./analyse_bse/ folder.
By default, the graphical interface is deactivated (assuming you run on a cluster because of ypp calls).
See line 2 inside the script.
"""
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if pack: # True by default, False if -np used
print('Packing ...')
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
print('Packing done.')
else:
print('Packing skipped.')
# importing data from .json files in <folder>
print('Importing...')
data = YamboAnalyser(folder)
# extract data according to relevant var
invars = data.get_inputfiles_tag(var)
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(list(invars.items()), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
print('Files detected: ',keys)
# unit of the input value
unit = invars[keys[0]]['variables'][var][1]
######################
# Output-file filename
######################
os.system('mkdir -p analyse_bse')
outname = './analyse_%s/%s_%s'%(folder,folder,var)
# Array that will contain the output
excitons = []
# Loop over all calculations
for key in keys:
jobname=key.replace('.json','')
print(jobname)
# input value
# BndsRn__ is a special case
if var.startswith('BndsRnX'):
# format : [1, nband, ...]
inp = invars[key]['variables'][var][0][1]
else:
inp = invars[key]['variables'][var][0]
print('Preparing JSON file. Calling ypp if necessary.')
### Creating the 'absorptionspectra.json' file
# It will contain the exciton energies
y = YamboOut(folder=folder,save_folder=folder)
# Args : name of job, SAVE folder path, folder where job was run path
a = YamboBSEAbsorptionSpectra(jobname,path=folder)
# Get excitons values (runs ypp once)
a.get_excitons(min_intensity=exc_int,max_energy=exc_max_E,Degen_Step=exc_degen)
# Write .json file with spectra and eigenenergies
a.write_json(filename=outname)
### Loading data from .json file
f = open(outname+'.json')
data = json.load(f)
f.close()
print('JSON file prepared and loaded.')
### Plotting the absorption spectra
# BSE spectra
plt.plot(data['E/ev[1]'], data['EPS-Im[2]'],label=jobname,lw=2)
# # Axes : lines for exciton energies (disabled, would make a mess)
# for n,exciton in enumerate(data['excitons']):
# plt.axvline(exciton['energy'])
### Creating array with exciton values (according to settings)
l = [inp]
for n,exciton in enumerate(data['excitons']):
if n <= exc_n-1:
l.append(exciton['energy'])
excitons.append(l)
if text:
header = 'Columns : '+var+' (in '+unit+') and "bright" excitons eigenenergies in order.'
print(excitons)
np.savetxt(outname+'.dat',excitons,header=header)
#np.savetxt(outname,excitons,header=header,fmt='%1f')
print(outname+'.dat')
else:
print('-nt flag : no text produced.')
if draw:
plt.xlabel('$\omega$ (eV)')
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.legend()
#plt.draw()
#plt.show()
plt.savefig(outname+'.png', bbox_inches='tight')
print(outname+'.png')
else:
print('-nd flag : no plot produced.')
print('Done.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Study convergence on BS calculations using ypp calls.')
pa = parser.add_argument
pa('folder', help='Folder containing SAVE and convergence runs.' )
pa('variable', help='Variable tested (e.g. FFTGvecs)' )
pa('-ne','--numbexc', help='Number of excitons to read beyond threshold', default=2,type=int)
pa('-ie','--intexc', help='Minimum intensity for excitons to be considered bright', default=0.05,type=float)
pa('-de','--degenexc', help='Energy threshold under which different peaks are merged (eV)', default=0.01,type=float)
pa('-me','--maxexc', help='Energy threshold after which excitons are not read anymore (eV)', default=8.0,type=float)
pa('-np','--nopack', help='Skips packing o- files into .json files', action='store_false')
pa('-nt','--notext', help='Skips writing the .dat file', action='store_false')
pa('-nd','--nodraw', help='Skips drawing (plotting) the abs spectra', action='store_false')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
folder = args.folder
var = args.variable
exc_n = args.numbexc
exc_int = args.intexc
exc_degen = args.degenexc
exc_max_E = args.maxexc
pack = args.nopack
text = args.text
draw = args.draw
analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack=pack, text=text, draw=draw )
| [((138, 13, 138, 105), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((95, 15, 95, 27), 'json.load', 'json.load', ({(95, 25, 95, 26): 'f'}, {}), '(f)', False, 'import json\n'), ((101, 8, 101, 71), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((117, 8, 117, 57), 'numpy.savetxt', 'np.savetxt', (), '', True, 'import numpy as np\n'), ((124, 8, 124, 35), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(124, 19, 124, 34): '"""$\\\\omega$ (eV)"""'}, {}), "('$\\\\omega$ (eV)')", True, 'import matplotlib.pyplot as plt\n'), ((126, 8, 126, 20), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((129, 8, 129, 56), 'matplotlib.pyplot.savefig', 'plt.savefig', (), '', True, 'import matplotlib.pyplot as plt\n'), ((152, 8, 152, 19), 'sys.exit', 'sys.exit', ({(152, 17, 152, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((49, 53, 49, 75), 'operator.itemgetter', 'operator.itemgetter', ({(49, 73, 49, 74): '1'}, {}), '(1)', False, 'import operator\n'), ((125, 42, 125, 59), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((125, 8, 125, 17), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n')] |
richteer/pyfatafl | halmodule.py | 1faddcf5d9eb36cbc6952b9a8e8bb899989f7112 | from module import XMPPModule
import halutils
import pyfatafl
class Game():
self.players = []
self.xmpp = None
self.b = None
self.turn = ""
self.mod = None
def __init__(self, mod, p1, p2):
self.players = [p1, p2]
self.mod = mod
self.xmpp = mod.xmpp
self.xmpp.sendMsg(p2, "You have been challenged to play Hnefatafl by {}, reply with '!hnefatafl accept' to begin!".format(p1))
def begin():
# Send initial board state
self.b = hnefatafl.Board()
self.turn = False # For now, make the challenger be first
self._sendBoard()
def _sendBoard(self)
for i in players:
self.xmpp.sendMsg(i, self.b.getPtBoard() + "\n\n" + "It is '{}''s ({}) turn".format(self.players[self.turn]), "white" if self.turn else "black")
def msg(player, string):
if player != self.players[self.turn]:
self.xmpp.sendMsg(player, "Sorry, it is not your turn!")
m = hnefatafl.Move()
string = "{} {}".format("w" if self.turn else "b", string)
try:
m.parse(string, self.b)
except:
self.xmpp.sendMsg(player, "Invalid move format, see !help hnefatafl")
try:
self.b.move(m)
self._sendBoard()
except Exception as e: # TODO: Have been errors
self.xmpp.sendMsg(player, str(e))
if self.over:
for i in self.players:
self.xmpp.sendMsg(i, "Game over! {} wins!".format(self.b.over))
del self.mod.sessions[i]
# Commented to avoid loading before its ready
class Hnefatafl(XMPPModule):
sessions = {}
def recvMsg(self, msg):
cmd, args = halutils.splitArgList(msg)
if cmd == "!hnefatafl":
if args[0] == "challenge":
if len(args) != 2:
self.xmpp.reply(msg, "Need to the JID of a target")
return
elif arg[1] == msg['body'].bare:
self.xmpp.reply(msg, "You can't challenge yourself...")
# TODO: Validate JID here
g = Game(self, msg['from'].bare, args[1])
self.sessions[msg['from']].bare = g
self.sessions[args[1]] = g
self.xmpp.reply(msg, "Challenge sent!")
elif args[0] == "accept":
if msg['from'].bare not in self.sessions:
self.xmpp.reply(msg, "You have not been challenged!")
return
self.sessions[msg['from'].bare].begin()
elif args[0] == "surrender":
if msg['from'].bare not in self.sessions:
self.xmpp.reply(msg, "You aren't currently in a session")
return
for p in [p for p in self.sessions[msg['from'].bare].players]:
del self.sessions[p]
elif msg['from'].bare in sessions:
self.sessions[msg['from'].bare].msg(msg['from'].bare, msg['body'])
def help(self, string):
if string in ["!hnefatafl", "hnefatafl"]:
return '''
usage: !hnefatafl <command> [arg]
Commands:
challenge <jid> - Send a challenge to JID
accept - Accept a challenge from JID, and begin game
surrender - Surrender the game
'''
return '''
Hnefatafl by XMPP! Play a game against someone through this bot.
Features:
!hnefatafl - Command to challenge, accept, and surrender games
Note: This module will ignore any MUC messages, or other indirect messages
Another Note: This will likely be unplayable if not using a monospace font :)
'''
| [] |
arkhipenko/AceTime | tools/acetz.py | bc6e6aa530e309b62a204b7574322ba013066b06 | from typing import cast, Optional
from datetime import datetime, tzinfo, timedelta
from zonedbpy import zone_infos
from zone_processor.zone_specifier import ZoneSpecifier
from zone_processor.inline_zone_info import ZoneInfo
__version__ = '1.1'
class acetz(tzinfo):
"""An implementation of datetime.tzinfo using the ZoneSpecifier class
from AceTime/tools.
"""
def __init__(self, zone_info: ZoneInfo):
self.zone_info = zone_info
self.zs = ZoneSpecifier(zone_info, use_python_transition=True)
def utcoffset(self, dt: Optional[datetime]) -> timedelta:
assert dt
self.zs.init_for_year(dt.year)
offset_info = self.zs.get_timezone_info_for_datetime(dt)
if not offset_info:
raise Exception(
f'Unknown timezone info for '
f'{dt.year:04}-{dt.month:02}-{dt.day:02} '
f'{dt.hour:02}:{dt.minute:02}:{dt.second:02}'
)
return timedelta(seconds=offset_info.total_offset)
def dst(self, dt: Optional[datetime]) -> timedelta:
assert dt
self.zs.init_for_year(dt.year)
offset_info = self.zs.get_timezone_info_for_datetime(dt)
if not offset_info:
raise Exception(
f'Unknown timezone info for '
f'{dt.year:04}-{dt.month:02}-{dt.day:02} '
f'{dt.hour:02}:{dt.minute:02}:{dt.second:02}'
)
return timedelta(seconds=offset_info.dst_offset)
def tzname(self, dt: Optional[datetime]) -> str:
assert dt
self.zs.init_for_year(dt.year)
offset_info = self.zs.get_timezone_info_for_datetime(dt)
if not offset_info:
raise Exception(
f'Unknown timezone info for '
f'{dt.year:04}-{dt.month:02}-{dt.day:02} '
f'{dt.hour:02}:{dt.minute:02}:{dt.second:02}'
)
return offset_info.abbrev
def zone_specifier(self) -> ZoneSpecifier:
return self.zs
def gettz(zone_name: str) -> acetz:
zone_info = cast(ZoneInfo, zone_infos.ZONE_INFO_MAP.get(zone_name))
if not zone_info:
raise Exception(f"Zone '{zone_name}' not found")
return acetz(zone_info)
| [((17, 18, 17, 70), 'zone_processor.zone_specifier.ZoneSpecifier', 'ZoneSpecifier', (), '', False, 'from zone_processor.zone_specifier import ZoneSpecifier\n'), ((29, 15, 29, 58), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, tzinfo, timedelta\n'), ((41, 15, 41, 56), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, tzinfo, timedelta\n'), ((60, 31, 60, 70), 'zonedbpy.zone_infos.ZONE_INFO_MAP.get', 'zone_infos.ZONE_INFO_MAP.get', ({(60, 60, 60, 69): 'zone_name'}, {}), '(zone_name)', False, 'from zonedbpy import zone_infos\n')] |
kozakusek/ipp-2020-testy | z2/part2/interactive/jm/random_fuzzy_arrows_1/554539540.py | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 554539540
"""
"""
random actions, total chaos
"""
board = gamma_new(6, 8, 3, 17)
assert board is not None
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 5, 1) == 1
assert gamma_move(board, 2, 1, 7) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_golden_move(board, 3, 3, 4) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 1, 3) == 1
assert gamma_move(board, 1, 3, 5) == 1
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 2, 2) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 5, 4) == 1
assert gamma_move(board, 3, 0, 4) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 1, 4) == 1
assert gamma_move(board, 2, 1, 6) == 1
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_move(board, 1, 4, 2) == 1
board251673140 = gamma_board(board)
assert board251673140 is not None
assert board251673140 == (".2....\n"
".2....\n"
"...1..\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
".1...2\n"
".3....\n")
del board251673140
board251673140 = None
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 3, 4, 5) == 1
assert gamma_move(board, 3, 3, 0) == 1
assert gamma_free_fields(board, 3) == 29
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 3, 0, 5) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 0, 7) == 1
board281476409 = gamma_board(board)
assert board281476409 is not None
assert board281476409 == ("12....\n"
".2....\n"
"3..13.\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
"31...2\n"
".3.3..\n")
del board281476409
board281476409 = None
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_busy_fields(board, 3) == 8
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 2, 4, 4) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_free_fields(board, 3) == 24
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 2, 1) == 1
board412285252 = gamma_board(board)
assert board412285252 is not None
assert board412285252 == ("12....\n"
".2....\n"
"3..13.\n"
"32..22\n"
"131.1.\n"
"113.1.\n"
"311..2\n"
"13.3..\n")
del board412285252
board412285252 = None
assert gamma_move(board, 2, 1, 6) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_free_fields(board, 3) == 23
assert gamma_golden_move(board, 3, 4, 4) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 3, 6) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_free_fields(board, 2) == 22
assert gamma_move(board, 3, 5, 5) == 1
assert gamma_move(board, 3, 5, 5) == 0
assert gamma_free_fields(board, 3) == 21
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 5, 7) == 1
assert gamma_move(board, 2, 0, 6) == 1
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 1, 5, 2) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 3, 3, 1) == 1
assert gamma_move(board, 1, 5, 1) == 0
assert gamma_free_fields(board, 1) == 16
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 3, 4, 1) == 1
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 1, 5) == 1
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 1, 2, 4) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_busy_fields(board, 1) == 16
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 0, 6) == 0
assert gamma_move(board, 2, 5, 5) == 0
assert gamma_golden_move(board, 2, 2, 2) == 1
assert gamma_move(board, 1, 5, 5) == 0
assert gamma_free_fields(board, 1) == 13
assert gamma_move(board, 2, 2, 6) == 1
assert gamma_move(board, 2, 5, 6) == 0
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 2, 7, 3) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_move(board, 1, 7, 2) == 0
board481507094 = gamma_board(board)
assert board481507094 is not None
assert board481507094 == ("12...1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board481507094
board481507094 = None
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_golden_possible(board, 3) == 0
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 1, 1, 6) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 1, 6, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 7) == 1
board984249076 = gamma_board(board)
assert board984249076 is not None
assert board984249076 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board984249076
board984249076 = None
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_golden_possible(board, 1) == 1
board492321582 = gamma_board(board)
assert board492321582 is not None
assert board492321582 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board492321582
board492321582 = None
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 6) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 2, 3, 2) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 5, 6) == 0
assert gamma_move(board, 3, 2, 1) == 0
gamma_delete(board)
| [((19, 8, 19, 30), 'part1.gamma_new', 'gamma_new', ({(19, 18, 19, 19): '6', (19, 21, 19, 22): '8', (19, 24, 19, 25): '3', (19, 27, 19, 29): '17'}, {}), '(6, 8, 3, 17)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((53, 17, 53, 35), 'part1.gamma_board', 'gamma_board', ({(53, 29, 53, 34): 'board'}, {}), '(board)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((79, 17, 79, 35), 'part1.gamma_board', 'gamma_board', ({(79, 29, 79, 34): 'board'}, {}), '(board)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((108, 17, 108, 35), 'part1.gamma_board', 'gamma_board', ({(108, 29, 108, 34): 'board'}, {}), '(board)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((191, 17, 191, 35), 'part1.gamma_board', 'gamma_board', ({(191, 29, 191, 34): 'board'}, {}), '(board)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((226, 17, 226, 35), 'part1.gamma_board', 'gamma_board', ({(226, 29, 226, 34): 'board'}, {}), '(board)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((242, 17, 242, 35), 'part1.gamma_board', 'gamma_board', ({(242, 29, 242, 34): 'board'}, {}), '(board)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((276, 0, 276, 19), 'part1.gamma_delete', 'gamma_delete', ({(276, 13, 276, 18): 'board'}, {}), '(board)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((23, 7, 23, 33), 'part1.gamma_move', 'gamma_move', ({(23, 18, 23, 23): 'board', (23, 25, 23, 26): '(1)', (23, 28, 23, 29): '(7)', (23, 31, 23, 32): '(4)'}, {}), '(board, 1, 7, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((24, 7, 24, 33), 'part1.gamma_move', 'gamma_move', ({(24, 18, 24, 23): 'board', (24, 25, 24, 26): '(1)', (24, 28, 24, 29): '(4)', (24, 31, 24, 32): '(3)'}, {}), '(board, 1, 4, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((25, 7, 25, 34), 'part1.gamma_busy_fields', 'gamma_busy_fields', ({(25, 25, 25, 30): 'board', (25, 32, 25, 33): '(1)'}, {}), '(board, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((26, 7, 26, 33), 'part1.gamma_move', 'gamma_move', ({(26, 18, 26, 23): 'board', (26, 25, 26, 26): '(2)', (26, 28, 26, 29): '(5)', (26, 31, 26, 32): '(1)'}, {}), '(board, 2, 5, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((27, 7, 27, 33), 'part1.gamma_move', 'gamma_move', ({(27, 18, 27, 23): 'board', (27, 25, 27, 26): '(2)', (27, 28, 27, 29): '(1)', (27, 31, 27, 32): '(7)'}, {}), '(board, 2, 1, 7)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((28, 7, 28, 34), 'part1.gamma_busy_fields', 'gamma_busy_fields', ({(28, 25, 28, 30): 'board', (28, 32, 28, 33): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((29, 7, 29, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(29, 29, 29, 34): 'board', (29, 36, 29, 37): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((30, 7, 30, 33), 'part1.gamma_move', 'gamma_move', ({(30, 18, 30, 23): 'board', (30, 25, 30, 26): '(3)', (30, 28, 30, 29): '(1)', (30, 31, 30, 32): '(0)'}, {}), '(board, 3, 1, 0)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((31, 7, 31, 40), 'part1.gamma_golden_move', 'gamma_golden_move', ({(31, 25, 31, 30): 'board', (31, 32, 31, 33): '(3)', (31, 35, 31, 36): '(3)', (31, 38, 31, 39): '(4)'}, {}), '(board, 3, 3, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((32, 7, 32, 34), 'part1.gamma_busy_fields', 'gamma_busy_fields', ({(32, 25, 32, 30): 'board', (32, 32, 32, 33): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((33, 7, 33, 33), 'part1.gamma_move', 'gamma_move', ({(33, 18, 33, 23): 'board', (33, 25, 33, 26): '(3)', (33, 28, 33, 29): '(1)', (33, 31, 33, 32): '(3)'}, {}), '(board, 3, 1, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((34, 7, 34, 33), 'part1.gamma_move', 'gamma_move', ({(34, 18, 34, 23): 'board', (34, 25, 34, 26): '(1)', (34, 28, 34, 29): '(3)', (34, 31, 34, 32): '(5)'}, {}), '(board, 1, 3, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((35, 7, 35, 33), 'part1.gamma_move', 'gamma_move', ({(35, 18, 35, 23): 'board', (35, 25, 35, 26): '(1)', (35, 28, 35, 29): '(2)', (35, 31, 35, 32): '(3)'}, {}), '(board, 1, 2, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((36, 7, 36, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(36, 29, 36, 34): 'board', (36, 36, 36, 37): '(1)'}, {}), '(board, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((37, 7, 37, 33), 'part1.gamma_move', 'gamma_move', ({(37, 18, 37, 23): 'board', (37, 25, 37, 26): '(2)', (37, 28, 37, 29): '(1)', (37, 31, 37, 32): '(0)'}, {}), '(board, 2, 1, 0)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((38, 7, 38, 33), 'part1.gamma_move', 'gamma_move', ({(38, 18, 38, 23): 'board', (38, 25, 38, 26): '(3)', (38, 28, 38, 29): '(2)', (38, 31, 38, 32): '(2)'}, {}), '(board, 3, 2, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((39, 7, 39, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(39, 29, 39, 34): 'board', (39, 36, 39, 37): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((40, 7, 40, 33), 'part1.gamma_move', 'gamma_move', ({(40, 18, 40, 23): 'board', (40, 25, 40, 26): '(1)', (40, 28, 40, 29): '(0)', (40, 31, 40, 32): '(2)'}, {}), '(board, 1, 0, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((41, 7, 41, 33), 'part1.gamma_move', 'gamma_move', ({(41, 18, 41, 23): 'board', (41, 25, 41, 26): '(1)', (41, 28, 41, 29): '(1)', (41, 31, 41, 32): '(1)'}, {}), '(board, 1, 1, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((42, 7, 42, 33), 'part1.gamma_move', 'gamma_move', ({(42, 18, 42, 23): 'board', (42, 25, 42, 26): '(2)', (42, 28, 42, 29): '(5)', (42, 31, 42, 32): '(4)'}, {}), '(board, 2, 5, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((43, 7, 43, 33), 'part1.gamma_move', 'gamma_move', ({(43, 18, 43, 23): 'board', (43, 25, 43, 26): '(3)', (43, 28, 43, 29): '(0)', (43, 31, 43, 32): '(4)'}, {}), '(board, 3, 0, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((44, 7, 44, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(44, 29, 44, 34): 'board', (44, 36, 44, 37): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((45, 7, 45, 33), 'part1.gamma_move', 'gamma_move', ({(45, 18, 45, 23): 'board', (45, 25, 45, 26): '(1)', (45, 28, 45, 29): '(1)', (45, 31, 45, 32): '(2)'}, {}), '(board, 1, 1, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((46, 7, 46, 33), 'part1.gamma_move', 'gamma_move', ({(46, 18, 46, 23): 'board', (46, 25, 46, 26): '(2)', (46, 28, 46, 29): '(1)', (46, 31, 46, 32): '(4)'}, {}), '(board, 2, 1, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((47, 7, 47, 33), 'part1.gamma_move', 'gamma_move', ({(47, 18, 47, 23): 'board', (47, 25, 47, 26): '(2)', (47, 28, 47, 29): '(1)', (47, 31, 47, 32): '(6)'}, {}), '(board, 2, 1, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((48, 7, 48, 33), 'part1.gamma_move', 'gamma_move', ({(48, 18, 48, 23): 'board', (48, 25, 48, 26): '(3)', (48, 28, 48, 29): '(1)', (48, 31, 48, 32): '(2)'}, {}), '(board, 3, 1, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((49, 7, 49, 33), 'part1.gamma_move', 'gamma_move', ({(49, 18, 49, 23): 'board', (49, 25, 49, 26): '(1)', (49, 28, 49, 29): '(0)', (49, 31, 49, 32): '(3)'}, {}), '(board, 1, 0, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((50, 7, 50, 33), 'part1.gamma_move', 'gamma_move', ({(50, 18, 50, 23): 'board', (50, 25, 50, 26): '(1)', (50, 28, 50, 29): '(4)', (50, 31, 50, 32): '(2)'}, {}), '(board, 1, 4, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((65, 7, 65, 33), 'part1.gamma_move', 'gamma_move', ({(65, 18, 65, 23): 'board', (65, 25, 65, 26): '(2)', (65, 28, 65, 29): '(4)', (65, 31, 65, 32): '(3)'}, {}), '(board, 2, 4, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((66, 7, 66, 33), 'part1.gamma_move', 'gamma_move', ({(66, 18, 66, 23): 'board', (66, 25, 66, 26): '(2)', (66, 28, 66, 29): '(5)', (66, 31, 66, 32): '(1)'}, {}), '(board, 2, 5, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((67, 7, 67, 33), 'part1.gamma_move', 'gamma_move', ({(67, 18, 67, 23): 'board', (67, 25, 67, 26): '(3)', (67, 28, 67, 29): '(4)', (67, 31, 67, 32): '(5)'}, {}), '(board, 3, 4, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((68, 7, 68, 33), 'part1.gamma_move', 'gamma_move', ({(68, 18, 68, 23): 'board', (68, 25, 68, 26): '(3)', (68, 28, 68, 29): '(3)', (68, 31, 68, 32): '(0)'}, {}), '(board, 3, 3, 0)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((69, 7, 69, 34), 'part1.gamma_free_fields', 'gamma_free_fields', ({(69, 25, 69, 30): 'board', (69, 32, 69, 33): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((70, 7, 70, 33), 'part1.gamma_move', 'gamma_move', ({(70, 18, 70, 23): 'board', (70, 25, 70, 26): '(2)', (70, 28, 70, 29): '(1)', (70, 31, 70, 32): '(7)'}, {}), '(board, 2, 1, 7)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((71, 7, 71, 33), 'part1.gamma_move', 'gamma_move', ({(71, 18, 71, 23): 'board', (71, 25, 71, 26): '(2)', (71, 28, 71, 29): '(3)', (71, 31, 71, 32): '(5)'}, {}), '(board, 2, 3, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((72, 7, 72, 33), 'part1.gamma_move', 'gamma_move', ({(72, 18, 72, 23): 'board', (72, 25, 72, 26): '(3)', (72, 28, 72, 29): '(0)', (72, 31, 72, 32): '(5)'}, {}), '(board, 3, 0, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((73, 7, 73, 33), 'part1.gamma_move', 'gamma_move', ({(73, 18, 73, 23): 'board', (73, 25, 73, 26): '(3)', (73, 28, 73, 29): '(0)', (73, 31, 73, 32): '(1)'}, {}), '(board, 3, 0, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((74, 7, 74, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(74, 29, 74, 34): 'board', (74, 36, 74, 37): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((75, 7, 75, 33), 'part1.gamma_move', 'gamma_move', ({(75, 18, 75, 23): 'board', (75, 25, 75, 26): '(1)', (75, 28, 75, 29): '(3)', (75, 31, 75, 32): '(0)'}, {}), '(board, 1, 3, 0)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((76, 7, 76, 33), 'part1.gamma_move', 'gamma_move', ({(76, 18, 76, 23): 'board', (76, 25, 76, 26): '(1)', (76, 28, 76, 29): '(0)', (76, 31, 76, 32): '(7)'}, {}), '(board, 1, 0, 7)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((91, 7, 91, 33), 'part1.gamma_move', 'gamma_move', ({(91, 18, 91, 23): 'board', (91, 25, 91, 26): '(2)', (91, 28, 91, 29): '(5)', (91, 31, 91, 32): '(1)'}, {}), '(board, 2, 5, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((92, 7, 92, 33), 'part1.gamma_move', 'gamma_move', ({(92, 18, 92, 23): 'board', (92, 25, 92, 26): '(2)', (92, 28, 92, 29): '(5)', (92, 31, 92, 32): '(4)'}, {}), '(board, 2, 5, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((93, 7, 93, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(93, 29, 93, 34): 'board', (93, 36, 93, 37): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((94, 7, 94, 33), 'part1.gamma_move', 'gamma_move', ({(94, 18, 94, 23): 'board', (94, 25, 94, 26): '(3)', (94, 28, 94, 29): '(7)', (94, 31, 94, 32): '(3)'}, {}), '(board, 3, 7, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((95, 7, 95, 33), 'part1.gamma_move', 'gamma_move', ({(95, 18, 95, 23): 'board', (95, 25, 95, 26): '(3)', (95, 28, 95, 29): '(5)', (95, 31, 95, 32): '(1)'}, {}), '(board, 3, 5, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((96, 7, 96, 34), 'part1.gamma_busy_fields', 'gamma_busy_fields', ({(96, 25, 96, 30): 'board', (96, 32, 96, 33): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((97, 7, 97, 33), 'part1.gamma_move', 'gamma_move', ({(97, 18, 97, 23): 'board', (97, 25, 97, 26): '(1)', (97, 28, 97, 29): '(5)', (97, 31, 97, 32): '(4)'}, {}), '(board, 1, 5, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((98, 7, 98, 33), 'part1.gamma_move', 'gamma_move', ({(98, 18, 98, 23): 'board', (98, 25, 98, 26): '(1)', (98, 28, 98, 29): '(0)', (98, 31, 98, 32): '(0)'}, {}), '(board, 1, 0, 0)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((99, 7, 99, 33), 'part1.gamma_move', 'gamma_move', ({(99, 18, 99, 23): 'board', (99, 25, 99, 26): '(2)', (99, 28, 99, 29): '(6)', (99, 31, 99, 32): '(3)'}, {}), '(board, 2, 6, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((100, 7, 100, 33), 'part1.gamma_move', 'gamma_move', ({(100, 18, 100, 23): 'board', (100, 25, 100, 26): '(2)', (100, 28, 100, 29): '(4)', (100, 31, 100, 32): '(4)'}, {}), '(board, 2, 4, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((101, 7, 101, 33), 'part1.gamma_move', 'gamma_move', ({(101, 18, 101, 23): 'board', (101, 25, 101, 26): '(3)', (101, 28, 101, 29): '(0)', (101, 31, 101, 32): '(5)'}, {}), '(board, 3, 0, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((102, 7, 102, 33), 'part1.gamma_move', 'gamma_move', ({(102, 18, 102, 23): 'board', (102, 25, 102, 26): '(3)', (102, 28, 102, 29): '(0)', (102, 31, 102, 32): '(1)'}, {}), '(board, 3, 0, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((103, 7, 103, 34), 'part1.gamma_free_fields', 'gamma_free_fields', ({(103, 25, 103, 30): 'board', (103, 32, 103, 33): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((104, 7, 104, 33), 'part1.gamma_move', 'gamma_move', ({(104, 18, 104, 23): 'board', (104, 25, 104, 26): '(1)', (104, 28, 104, 29): '(1)', (104, 31, 104, 32): '(7)'}, {}), '(board, 1, 1, 7)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((105, 7, 105, 33), 'part1.gamma_move', 'gamma_move', ({(105, 18, 105, 23): 'board', (105, 25, 105, 26): '(1)', (105, 28, 105, 29): '(2)', (105, 31, 105, 32): '(1)'}, {}), '(board, 1, 2, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((120, 7, 120, 33), 'part1.gamma_move', 'gamma_move', ({(120, 18, 120, 23): 'board', (120, 25, 120, 26): '(2)', (120, 28, 120, 29): '(1)', (120, 31, 120, 32): '(6)'}, {}), '(board, 2, 1, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((121, 7, 121, 33), 'part1.gamma_move', 'gamma_move', ({(121, 18, 121, 23): 'board', (121, 25, 121, 26): '(2)', (121, 28, 121, 29): '(2)', (121, 31, 121, 32): '(1)'}, {}), '(board, 2, 2, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((122, 7, 122, 33), 'part1.gamma_move', 'gamma_move', ({(122, 18, 122, 23): 'board', (122, 25, 122, 26): '(3)', (122, 28, 122, 29): '(1)', (122, 31, 122, 32): '(2)'}, {}), '(board, 3, 1, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((123, 7, 123, 34), 'part1.gamma_free_fields', 'gamma_free_fields', ({(123, 25, 123, 30): 'board', (123, 32, 123, 33): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((124, 7, 124, 40), 'part1.gamma_golden_move', 'gamma_golden_move', ({(124, 25, 124, 30): 'board', (124, 32, 124, 33): '(3)', (124, 35, 124, 36): '(4)', (124, 38, 124, 39): '(4)'}, {}), '(board, 3, 4, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((125, 7, 125, 33), 'part1.gamma_move', 'gamma_move', ({(125, 18, 125, 23): 'board', (125, 25, 125, 26): '(1)', (125, 28, 125, 29): '(0)', (125, 31, 125, 32): '(2)'}, {}), '(board, 1, 0, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((126, 7, 126, 33), 'part1.gamma_move', 'gamma_move', ({(126, 18, 126, 23): 'board', (126, 25, 126, 26): '(1)', (126, 28, 126, 29): '(3)', (126, 31, 126, 32): '(6)'}, {}), '(board, 1, 3, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((127, 7, 127, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(127, 29, 127, 34): 'board', (127, 36, 127, 37): '(1)'}, {}), '(board, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((128, 7, 128, 33), 'part1.gamma_move', 'gamma_move', ({(128, 18, 128, 23): 'board', (128, 25, 128, 26): '(2)', (128, 28, 128, 29): '(7)', (128, 31, 128, 32): '(4)'}, {}), '(board, 2, 7, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((129, 7, 129, 34), 'part1.gamma_free_fields', 'gamma_free_fields', ({(129, 25, 129, 30): 'board', (129, 32, 129, 33): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((130, 7, 130, 33), 'part1.gamma_move', 'gamma_move', ({(130, 18, 130, 23): 'board', (130, 25, 130, 26): '(3)', (130, 28, 130, 29): '(5)', (130, 31, 130, 32): '(5)'}, {}), '(board, 3, 5, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((131, 7, 131, 33), 'part1.gamma_move', 'gamma_move', ({(131, 18, 131, 23): 'board', (131, 25, 131, 26): '(3)', (131, 28, 131, 29): '(5)', (131, 31, 131, 32): '(5)'}, {}), '(board, 3, 5, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((132, 7, 132, 34), 'part1.gamma_free_fields', 'gamma_free_fields', ({(132, 25, 132, 30): 'board', (132, 32, 132, 33): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((133, 7, 133, 33), 'part1.gamma_move', 'gamma_move', ({(133, 18, 133, 23): 'board', (133, 25, 133, 26): '(1)', (133, 28, 133, 29): '(0)', (133, 31, 133, 32): '(5)'}, {}), '(board, 1, 0, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((134, 7, 134, 33), 'part1.gamma_move', 'gamma_move', ({(134, 18, 134, 23): 'board', (134, 25, 134, 26): '(1)', (134, 28, 134, 29): '(5)', (134, 31, 134, 32): '(7)'}, {}), '(board, 1, 5, 7)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((135, 7, 135, 33), 'part1.gamma_move', 'gamma_move', ({(135, 18, 135, 23): 'board', (135, 25, 135, 26): '(2)', (135, 28, 135, 29): '(0)', (135, 31, 135, 32): '(6)'}, {}), '(board, 2, 0, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((136, 7, 136, 33), 'part1.gamma_move', 'gamma_move', ({(136, 18, 136, 23): 'board', (136, 25, 136, 26): '(2)', (136, 28, 136, 29): '(5)', (136, 31, 136, 32): '(6)'}, {}), '(board, 2, 5, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((137, 7, 137, 33), 'part1.gamma_move', 'gamma_move', ({(137, 18, 137, 23): 'board', (137, 25, 137, 26): '(3)', (137, 28, 137, 29): '(2)', (137, 31, 137, 32): '(2)'}, {}), '(board, 3, 2, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((138, 7, 138, 33), 'part1.gamma_move', 'gamma_move', ({(138, 18, 138, 23): 'board', (138, 25, 138, 26): '(1)', (138, 28, 138, 29): '(5)', (138, 31, 138, 32): '(2)'}, {}), '(board, 1, 5, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((139, 7, 139, 33), 'part1.gamma_move', 'gamma_move', ({(139, 18, 139, 23): 'board', (139, 25, 139, 26): '(2)', (139, 28, 139, 29): '(7)', (139, 31, 139, 32): '(4)'}, {}), '(board, 2, 7, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((140, 7, 140, 33), 'part1.gamma_move', 'gamma_move', ({(140, 18, 140, 23): 'board', (140, 25, 140, 26): '(3)', (140, 28, 140, 29): '(2)', (140, 31, 140, 32): '(3)'}, {}), '(board, 3, 2, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((141, 7, 141, 33), 'part1.gamma_move', 'gamma_move', ({(141, 18, 141, 23): 'board', (141, 25, 141, 26): '(3)', (141, 28, 141, 29): '(3)', (141, 31, 141, 32): '(1)'}, {}), '(board, 3, 3, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((142, 7, 142, 33), 'part1.gamma_move', 'gamma_move', ({(142, 18, 142, 23): 'board', (142, 25, 142, 26): '(1)', (142, 28, 142, 29): '(5)', (142, 31, 142, 32): '(1)'}, {}), '(board, 1, 5, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((143, 7, 143, 34), 'part1.gamma_free_fields', 'gamma_free_fields', ({(143, 25, 143, 30): 'board', (143, 32, 143, 33): '(1)'}, {}), '(board, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((144, 7, 144, 33), 'part1.gamma_move', 'gamma_move', ({(144, 18, 144, 23): 'board', (144, 25, 144, 26): '(2)', (144, 28, 144, 29): '(4)', (144, 31, 144, 32): '(2)'}, {}), '(board, 2, 4, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((145, 7, 145, 33), 'part1.gamma_move', 'gamma_move', ({(145, 18, 145, 23): 'board', (145, 25, 145, 26): '(3)', (145, 28, 145, 29): '(4)', (145, 31, 145, 32): '(1)'}, {}), '(board, 3, 4, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((146, 7, 146, 33), 'part1.gamma_move', 'gamma_move', ({(146, 18, 146, 23): 'board', (146, 25, 146, 26): '(3)', (146, 28, 146, 29): '(5)', (146, 31, 146, 32): '(2)'}, {}), '(board, 3, 5, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((147, 7, 147, 33), 'part1.gamma_move', 'gamma_move', ({(147, 18, 147, 23): 'board', (147, 25, 147, 26): '(1)', (147, 28, 147, 29): '(7)', (147, 31, 147, 32): '(4)'}, {}), '(board, 1, 7, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((148, 7, 148, 33), 'part1.gamma_move', 'gamma_move', ({(148, 18, 148, 23): 'board', (148, 25, 148, 26): '(1)', (148, 28, 148, 29): '(4)', (148, 31, 148, 32): '(1)'}, {}), '(board, 1, 4, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((149, 7, 149, 33), 'part1.gamma_move', 'gamma_move', ({(149, 18, 149, 23): 'board', (149, 25, 149, 26): '(2)', (149, 28, 149, 29): '(0)', (149, 31, 149, 32): '(2)'}, {}), '(board, 2, 0, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((150, 7, 150, 33), 'part1.gamma_move', 'gamma_move', ({(150, 18, 150, 23): 'board', (150, 25, 150, 26): '(2)', (150, 28, 150, 29): '(0)', (150, 31, 150, 32): '(5)'}, {}), '(board, 2, 0, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((151, 7, 151, 34), 'part1.gamma_busy_fields', 'gamma_busy_fields', ({(151, 25, 151, 30): 'board', (151, 32, 151, 33): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((152, 7, 152, 33), 'part1.gamma_move', 'gamma_move', ({(152, 18, 152, 23): 'board', (152, 25, 152, 26): '(3)', (152, 28, 152, 29): '(5)', (152, 31, 152, 32): '(2)'}, {}), '(board, 3, 5, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((153, 7, 153, 33), 'part1.gamma_move', 'gamma_move', ({(153, 18, 153, 23): 'board', (153, 25, 153, 26): '(1)', (153, 28, 153, 29): '(1)', (153, 31, 153, 32): '(5)'}, {}), '(board, 1, 1, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((154, 7, 154, 33), 'part1.gamma_move', 'gamma_move', ({(154, 18, 154, 23): 'board', (154, 25, 154, 26): '(2)', (154, 28, 154, 29): '(3)', (154, 31, 154, 32): '(5)'}, {}), '(board, 2, 3, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((155, 7, 155, 33), 'part1.gamma_move', 'gamma_move', ({(155, 18, 155, 23): 'board', (155, 25, 155, 26): '(2)', (155, 28, 155, 29): '(4)', (155, 31, 155, 32): '(1)'}, {}), '(board, 2, 4, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((156, 7, 156, 33), 'part1.gamma_move', 'gamma_move', ({(156, 18, 156, 23): 'board', (156, 25, 156, 26): '(3)', (156, 28, 156, 29): '(0)', (156, 31, 156, 32): '(3)'}, {}), '(board, 3, 0, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((157, 7, 157, 33), 'part1.gamma_move', 'gamma_move', ({(157, 18, 157, 23): 'board', (157, 25, 157, 26): '(3)', (157, 28, 157, 29): '(1)', (157, 31, 157, 32): '(5)'}, {}), '(board, 3, 1, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((158, 7, 158, 33), 'part1.gamma_move', 'gamma_move', ({(158, 18, 158, 23): 'board', (158, 25, 158, 26): '(1)', (158, 28, 158, 29): '(2)', (158, 31, 158, 32): '(4)'}, {}), '(board, 1, 2, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((159, 7, 159, 33), 'part1.gamma_move', 'gamma_move', ({(159, 18, 159, 23): 'board', (159, 25, 159, 26): '(1)', (159, 28, 159, 29): '(3)', (159, 31, 159, 32): '(0)'}, {}), '(board, 1, 3, 0)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((160, 7, 160, 34), 'part1.gamma_busy_fields', 'gamma_busy_fields', ({(160, 25, 160, 30): 'board', (160, 32, 160, 33): '(1)'}, {}), '(board, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((161, 7, 161, 33), 'part1.gamma_move', 'gamma_move', ({(161, 18, 161, 23): 'board', (161, 25, 161, 26): '(2)', (161, 28, 161, 29): '(3)', (161, 31, 161, 32): '(5)'}, {}), '(board, 2, 3, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((162, 7, 162, 33), 'part1.gamma_move', 'gamma_move', ({(162, 18, 162, 23): 'board', (162, 25, 162, 26): '(2)', (162, 28, 162, 29): '(3)', (162, 31, 162, 32): '(1)'}, {}), '(board, 2, 3, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((163, 7, 163, 33), 'part1.gamma_move', 'gamma_move', ({(163, 18, 163, 23): 'board', (163, 25, 163, 26): '(3)', (163, 28, 163, 29): '(5)', (163, 31, 163, 32): '(2)'}, {}), '(board, 3, 5, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((164, 7, 164, 33), 'part1.gamma_move', 'gamma_move', ({(164, 18, 164, 23): 'board', (164, 25, 164, 26): '(1)', (164, 28, 164, 29): '(0)', (164, 31, 164, 32): '(4)'}, {}), '(board, 1, 0, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((165, 7, 165, 33), 'part1.gamma_move', 'gamma_move', ({(165, 18, 165, 23): 'board', (165, 25, 165, 26): '(1)', (165, 28, 165, 29): '(0)', (165, 31, 165, 32): '(6)'}, {}), '(board, 1, 0, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((166, 7, 166, 33), 'part1.gamma_move', 'gamma_move', ({(166, 18, 166, 23): 'board', (166, 25, 166, 26): '(2)', (166, 28, 166, 29): '(5)', (166, 31, 166, 32): '(5)'}, {}), '(board, 2, 5, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((167, 7, 167, 40), 'part1.gamma_golden_move', 'gamma_golden_move', ({(167, 25, 167, 30): 'board', (167, 32, 167, 33): '(2)', (167, 35, 167, 36): '(2)', (167, 38, 167, 39): '(2)'}, {}), '(board, 2, 2, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((168, 7, 168, 33), 'part1.gamma_move', 'gamma_move', ({(168, 18, 168, 23): 'board', (168, 25, 168, 26): '(1)', (168, 28, 168, 29): '(5)', (168, 31, 168, 32): '(5)'}, {}), '(board, 1, 5, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((169, 7, 169, 34), 'part1.gamma_free_fields', 'gamma_free_fields', ({(169, 25, 169, 30): 'board', (169, 32, 169, 33): '(1)'}, {}), '(board, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((170, 7, 170, 33), 'part1.gamma_move', 'gamma_move', ({(170, 18, 170, 23): 'board', (170, 25, 170, 26): '(2)', (170, 28, 170, 29): '(2)', (170, 31, 170, 32): '(6)'}, {}), '(board, 2, 2, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((171, 7, 171, 33), 'part1.gamma_move', 'gamma_move', ({(171, 18, 171, 23): 'board', (171, 25, 171, 26): '(2)', (171, 28, 171, 29): '(5)', (171, 31, 171, 32): '(6)'}, {}), '(board, 2, 5, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((172, 7, 172, 33), 'part1.gamma_move', 'gamma_move', ({(172, 18, 172, 23): 'board', (172, 25, 172, 26): '(3)', (172, 28, 172, 29): '(4)', (172, 31, 172, 32): '(3)'}, {}), '(board, 3, 4, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((173, 7, 173, 33), 'part1.gamma_move', 'gamma_move', ({(173, 18, 173, 23): 'board', (173, 25, 173, 26): '(1)', (173, 28, 173, 29): '(4)', (173, 31, 173, 32): '(3)'}, {}), '(board, 1, 4, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((174, 7, 174, 33), 'part1.gamma_move', 'gamma_move', ({(174, 18, 174, 23): 'board', (174, 25, 174, 26): '(1)', (174, 28, 174, 29): '(3)', (174, 31, 174, 32): '(5)'}, {}), '(board, 1, 3, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((175, 7, 175, 33), 'part1.gamma_move', 'gamma_move', ({(175, 18, 175, 23): 'board', (175, 25, 175, 26): '(2)', (175, 28, 175, 29): '(2)', (175, 31, 175, 32): '(0)'}, {}), '(board, 2, 2, 0)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((176, 7, 176, 33), 'part1.gamma_move', 'gamma_move', ({(176, 18, 176, 23): 'board', (176, 25, 176, 26): '(3)', (176, 28, 176, 29): '(0)', (176, 31, 176, 32): '(4)'}, {}), '(board, 3, 0, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((177, 7, 177, 33), 'part1.gamma_move', 'gamma_move', ({(177, 18, 177, 23): 'board', (177, 25, 177, 26): '(1)', (177, 28, 177, 29): '(7)', (177, 31, 177, 32): '(3)'}, {}), '(board, 1, 7, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((178, 7, 178, 33), 'part1.gamma_move', 'gamma_move', ({(178, 18, 178, 23): 'board', (178, 25, 178, 26): '(2)', (178, 28, 178, 29): '(7)', (178, 31, 178, 32): '(3)'}, {}), '(board, 2, 7, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((179, 7, 179, 33), 'part1.gamma_move', 'gamma_move', ({(179, 18, 179, 23): 'board', (179, 25, 179, 26): '(2)', (179, 28, 179, 29): '(3)', (179, 31, 179, 32): '(1)'}, {}), '(board, 2, 3, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((180, 7, 180, 33), 'part1.gamma_move', 'gamma_move', ({(180, 18, 180, 23): 'board', (180, 25, 180, 26): '(3)', (180, 28, 180, 29): '(7)', (180, 31, 180, 32): '(3)'}, {}), '(board, 3, 7, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((181, 7, 181, 33), 'part1.gamma_move', 'gamma_move', ({(181, 18, 181, 23): 'board', (181, 25, 181, 26): '(3)', (181, 28, 181, 29): '(0)', (181, 31, 181, 32): '(2)'}, {}), '(board, 3, 0, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((182, 7, 182, 33), 'part1.gamma_move', 'gamma_move', ({(182, 18, 182, 23): 'board', (182, 25, 182, 26): '(1)', (182, 28, 182, 29): '(3)', (182, 31, 182, 32): '(3)'}, {}), '(board, 1, 3, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((183, 7, 183, 33), 'part1.gamma_move', 'gamma_move', ({(183, 18, 183, 23): 'board', (183, 25, 183, 26): '(2)', (183, 28, 183, 29): '(7)', (183, 31, 183, 32): '(2)'}, {}), '(board, 2, 7, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((184, 7, 184, 33), 'part1.gamma_move', 'gamma_move', ({(184, 18, 184, 23): 'board', (184, 25, 184, 26): '(2)', (184, 28, 184, 29): '(2)', (184, 31, 184, 32): '(3)'}, {}), '(board, 2, 2, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((185, 7, 185, 34), 'part1.gamma_free_fields', 'gamma_free_fields', ({(185, 25, 185, 30): 'board', (185, 32, 185, 33): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((186, 7, 186, 33), 'part1.gamma_move', 'gamma_move', ({(186, 18, 186, 23): 'board', (186, 25, 186, 26): '(3)', (186, 28, 186, 29): '(7)', (186, 31, 186, 32): '(3)'}, {}), '(board, 3, 7, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((187, 7, 187, 33), 'part1.gamma_move', 'gamma_move', ({(187, 18, 187, 23): 'board', (187, 25, 187, 26): '(3)', (187, 28, 187, 29): '(5)', (187, 31, 187, 32): '(1)'}, {}), '(board, 3, 5, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((188, 7, 188, 33), 'part1.gamma_move', 'gamma_move', ({(188, 18, 188, 23): 'board', (188, 25, 188, 26): '(1)', (188, 28, 188, 29): '(7)', (188, 31, 188, 32): '(2)'}, {}), '(board, 1, 7, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((203, 7, 203, 33), 'part1.gamma_move', 'gamma_move', ({(203, 18, 203, 23): 'board', (203, 25, 203, 26): '(2)', (203, 28, 203, 29): '(2)', (203, 31, 203, 32): '(4)'}, {}), '(board, 2, 2, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((204, 7, 204, 33), 'part1.gamma_move', 'gamma_move', ({(204, 18, 204, 23): 'board', (204, 25, 204, 26): '(2)', (204, 28, 204, 29): '(5)', (204, 31, 204, 32): '(4)'}, {}), '(board, 2, 5, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((205, 7, 205, 34), 'part1.gamma_busy_fields', 'gamma_busy_fields', ({(205, 25, 205, 30): 'board', (205, 32, 205, 33): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((206, 7, 206, 33), 'part1.gamma_move', 'gamma_move', ({(206, 18, 206, 23): 'board', (206, 25, 206, 26): '(1)', (206, 28, 206, 29): '(7)', (206, 31, 206, 32): '(2)'}, {}), '(board, 1, 7, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((207, 7, 207, 33), 'part1.gamma_move', 'gamma_move', ({(207, 18, 207, 23): 'board', (207, 25, 207, 26): '(2)', (207, 28, 207, 29): '(7)', (207, 31, 207, 32): '(4)'}, {}), '(board, 2, 7, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((208, 7, 208, 33), 'part1.gamma_move', 'gamma_move', ({(208, 18, 208, 23): 'board', (208, 25, 208, 26): '(3)', (208, 28, 208, 29): '(0)', (208, 31, 208, 32): '(4)'}, {}), '(board, 3, 0, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((209, 7, 209, 34), 'part1.gamma_busy_fields', 'gamma_busy_fields', ({(209, 25, 209, 30): 'board', (209, 32, 209, 33): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((210, 7, 210, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(210, 29, 210, 34): 'board', (210, 36, 210, 37): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((211, 7, 211, 33), 'part1.gamma_move', 'gamma_move', ({(211, 18, 211, 23): 'board', (211, 25, 211, 26): '(2)', (211, 28, 211, 29): '(7)', (211, 31, 211, 32): '(2)'}, {}), '(board, 2, 7, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((212, 7, 212, 33), 'part1.gamma_move', 'gamma_move', ({(212, 18, 212, 23): 'board', (212, 25, 212, 26): '(2)', (212, 28, 212, 29): '(1)', (212, 31, 212, 32): '(4)'}, {}), '(board, 2, 1, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((213, 7, 213, 34), 'part1.gamma_free_fields', 'gamma_free_fields', ({(213, 25, 213, 30): 'board', (213, 32, 213, 33): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((214, 7, 214, 33), 'part1.gamma_move', 'gamma_move', ({(214, 18, 214, 23): 'board', (214, 25, 214, 26): '(3)', (214, 28, 214, 29): '(0)', (214, 31, 214, 32): '(5)'}, {}), '(board, 3, 0, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((215, 7, 215, 34), 'part1.gamma_busy_fields', 'gamma_busy_fields', ({(215, 25, 215, 30): 'board', (215, 32, 215, 33): '(3)'}, {}), '(board, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((216, 7, 216, 33), 'part1.gamma_move', 'gamma_move', ({(216, 18, 216, 23): 'board', (216, 25, 216, 26): '(1)', (216, 28, 216, 29): '(7)', (216, 31, 216, 32): '(2)'}, {}), '(board, 1, 7, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((217, 7, 217, 33), 'part1.gamma_move', 'gamma_move', ({(217, 18, 217, 23): 'board', (217, 25, 217, 26): '(1)', (217, 28, 217, 29): '(1)', (217, 31, 217, 32): '(6)'}, {}), '(board, 1, 1, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((218, 7, 218, 33), 'part1.gamma_move', 'gamma_move', ({(218, 18, 218, 23): 'board', (218, 25, 218, 26): '(2)', (218, 28, 218, 29): '(2)', (218, 31, 218, 32): '(0)'}, {}), '(board, 2, 2, 0)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((219, 7, 219, 33), 'part1.gamma_move', 'gamma_move', ({(219, 18, 219, 23): 'board', (219, 25, 219, 26): '(2)', (219, 28, 219, 29): '(1)', (219, 31, 219, 32): '(7)'}, {}), '(board, 2, 1, 7)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((220, 7, 220, 33), 'part1.gamma_move', 'gamma_move', ({(220, 18, 220, 23): 'board', (220, 25, 220, 26): '(3)', (220, 28, 220, 29): '(3)', (220, 31, 220, 32): '(1)'}, {}), '(board, 3, 3, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((221, 7, 221, 33), 'part1.gamma_move', 'gamma_move', ({(221, 18, 221, 23): 'board', (221, 25, 221, 26): '(1)', (221, 28, 221, 29): '(6)', (221, 31, 221, 32): '(4)'}, {}), '(board, 1, 6, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((222, 7, 222, 33), 'part1.gamma_move', 'gamma_move', ({(222, 18, 222, 23): 'board', (222, 25, 222, 26): '(2)', (222, 28, 222, 29): '(0)', (222, 31, 222, 32): '(4)'}, {}), '(board, 2, 0, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((223, 7, 223, 33), 'part1.gamma_move', 'gamma_move', ({(223, 18, 223, 23): 'board', (223, 25, 223, 26): '(2)', (223, 28, 223, 29): '(2)', (223, 31, 223, 32): '(7)'}, {}), '(board, 2, 2, 7)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((238, 7, 238, 33), 'part1.gamma_move', 'gamma_move', ({(238, 18, 238, 23): 'board', (238, 25, 238, 26): '(1)', (238, 28, 238, 29): '(4)', (238, 31, 238, 32): '(1)'}, {}), '(board, 1, 4, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((239, 7, 239, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(239, 29, 239, 34): 'board', (239, 36, 239, 37): '(1)'}, {}), '(board, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((254, 7, 254, 33), 'part1.gamma_move', 'gamma_move', ({(254, 18, 254, 23): 'board', (254, 25, 254, 26): '(2)', (254, 28, 254, 29): '(2)', (254, 31, 254, 32): '(3)'}, {}), '(board, 2, 2, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((255, 7, 255, 33), 'part1.gamma_move', 'gamma_move', ({(255, 18, 255, 23): 'board', (255, 25, 255, 26): '(2)', (255, 28, 255, 29): '(2)', (255, 31, 255, 32): '(4)'}, {}), '(board, 2, 2, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((256, 7, 256, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(256, 29, 256, 34): 'board', (256, 36, 256, 37): '(2)'}, {}), '(board, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((257, 7, 257, 33), 'part1.gamma_move', 'gamma_move', ({(257, 18, 257, 23): 'board', (257, 25, 257, 26): '(3)', (257, 28, 257, 29): '(2)', (257, 31, 257, 32): '(3)'}, {}), '(board, 3, 2, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((258, 7, 258, 33), 'part1.gamma_move', 'gamma_move', ({(258, 18, 258, 23): 'board', (258, 25, 258, 26): '(1)', (258, 28, 258, 29): '(7)', (258, 31, 258, 32): '(3)'}, {}), '(board, 1, 7, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((259, 7, 259, 33), 'part1.gamma_move', 'gamma_move', ({(259, 18, 259, 23): 'board', (259, 25, 259, 26): '(1)', (259, 28, 259, 29): '(4)', (259, 31, 259, 32): '(3)'}, {}), '(board, 1, 4, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((260, 7, 260, 33), 'part1.gamma_move', 'gamma_move', ({(260, 18, 260, 23): 'board', (260, 25, 260, 26): '(2)', (260, 28, 260, 29): '(2)', (260, 31, 260, 32): '(4)'}, {}), '(board, 2, 2, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((261, 7, 261, 33), 'part1.gamma_move', 'gamma_move', ({(261, 18, 261, 23): 'board', (261, 25, 261, 26): '(1)', (261, 28, 261, 29): '(0)', (261, 31, 261, 32): '(4)'}, {}), '(board, 1, 0, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((262, 7, 262, 33), 'part1.gamma_move', 'gamma_move', ({(262, 18, 262, 23): 'board', (262, 25, 262, 26): '(2)', (262, 28, 262, 29): '(0)', (262, 31, 262, 32): '(4)'}, {}), '(board, 2, 0, 4)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((263, 7, 263, 33), 'part1.gamma_move', 'gamma_move', ({(263, 18, 263, 23): 'board', (263, 25, 263, 26): '(2)', (263, 28, 263, 29): '(2)', (263, 31, 263, 32): '(6)'}, {}), '(board, 2, 2, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((264, 7, 264, 33), 'part1.gamma_move', 'gamma_move', ({(264, 18, 264, 23): 'board', (264, 25, 264, 26): '(3)', (264, 28, 264, 29): '(5)', (264, 31, 264, 32): '(2)'}, {}), '(board, 3, 5, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((265, 7, 265, 33), 'part1.gamma_move', 'gamma_move', ({(265, 18, 265, 23): 'board', (265, 25, 265, 26): '(1)', (265, 28, 265, 29): '(0)', (265, 31, 265, 32): '(5)'}, {}), '(board, 1, 0, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((266, 7, 266, 33), 'part1.gamma_move', 'gamma_move', ({(266, 18, 266, 23): 'board', (266, 25, 266, 26): '(2)', (266, 28, 266, 29): '(3)', (266, 31, 266, 32): '(2)'}, {}), '(board, 2, 3, 2)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((267, 7, 267, 33), 'part1.gamma_move', 'gamma_move', ({(267, 18, 267, 23): 'board', (267, 25, 267, 26): '(3)', (267, 28, 267, 29): '(0)', (267, 31, 267, 32): '(5)'}, {}), '(board, 3, 0, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((268, 7, 268, 33), 'part1.gamma_move', 'gamma_move', ({(268, 18, 268, 23): 'board', (268, 25, 268, 26): '(1)', (268, 28, 268, 29): '(0)', (268, 31, 268, 32): '(5)'}, {}), '(board, 1, 0, 5)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((269, 7, 269, 33), 'part1.gamma_move', 'gamma_move', ({(269, 18, 269, 23): 'board', (269, 25, 269, 26): '(1)', (269, 28, 269, 29): '(2)', (269, 31, 269, 32): '(3)'}, {}), '(board, 1, 2, 3)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((270, 7, 270, 38), 'part1.gamma_golden_possible', 'gamma_golden_possible', ({(270, 29, 270, 34): 'board', (270, 36, 270, 37): '(1)'}, {}), '(board, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((271, 7, 271, 33), 'part1.gamma_move', 'gamma_move', ({(271, 18, 271, 23): 'board', (271, 25, 271, 26): '(2)', (271, 28, 271, 29): '(2)', (271, 31, 271, 32): '(0)'}, {}), '(board, 2, 2, 0)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((272, 7, 272, 33), 'part1.gamma_move', 'gamma_move', ({(272, 18, 272, 23): 'board', (272, 25, 272, 26): '(3)', (272, 28, 272, 29): '(5)', (272, 31, 272, 32): '(6)'}, {}), '(board, 3, 5, 6)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((273, 7, 273, 33), 'part1.gamma_move', 'gamma_move', ({(273, 18, 273, 23): 'board', (273, 25, 273, 26): '(3)', (273, 28, 273, 29): '(2)', (273, 31, 273, 32): '(1)'}, {}), '(board, 3, 2, 1)', False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n')] |
ZhuoyuWei/transformers | examples/run_chemistry_parser.py | 16d0ebd55d17dd5095231566a0544ecebd56bc9c | # coding=utf-8
# Copyright 2019 The HuggingFace Inc. team.
# Copyright (c) 2019 The HuggingFace Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning seq2seq models for sequence generation."""
import argparse
import functools
import logging
import os
import random
import sys
sys.path.append(r'../')
import numpy as np
from tqdm import tqdm, trange
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import (
AutoTokenizer,
BertForMaskedLM,
BertConfig,
PreTrainedEncoderDecoder,
Model2Models,
)
from utils_summarization import (
CNNDailyMailDataset,
encode_for_summarization,
fit_to_block_size,
build_lm_labels,
build_mask,
compute_token_type_ids,
)
from utils_chemistry import (ChemistryDataset,)
'''
class InputExample(object):
def __init__(self,example_id,question_input,question_varible_output=None,condition_output=None):
self.example_id=example_id
self.question_input=question_input
self.question_varible_output=question_varible_output
self.condition_output=condition_output
'''
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# ------------
# Load dataset
# ------------
def load_and_cache_examples(args, tokenizer, prefix="train"):
dataset = ChemistryDataset(tokenizer, prefix=prefix, data_dir=args.data_dir)
return dataset
def collate(data, tokenizer, input_block_size,output_block_size):
""" List of tuple as an input. """
question_inputs=[]
question_varible_outputs=[]
condition_outputs=[]
for i,example in enumerate(data):
question_input=tokenizer.encode(example.question_input)
question_input=fit_to_block_size(question_input, input_block_size, tokenizer.pad_token_id)
question_inputs.append(question_input)
if example.question_varible_output is not None:
question_varible_output=tokenizer.encode(example.question_varible_output)
else:
question_varible_output=tokenizer.build_inputs_with_special_tokens([])
question_varible_output=fit_to_block_size(question_varible_output, output_block_size, tokenizer.pad_token_id)
question_varible_outputs.append(question_varible_output)
if example.condition_output is not None:
condition_output=tokenizer.encode(example.condition_output)
else:
condition_output=tokenizer.build_inputs_with_special_tokens([])
condition_output=fit_to_block_size(condition_output, output_block_size, tokenizer.pad_token_id)
condition_outputs.append(condition_output)
question_inputs = torch.tensor(question_inputs)
question_varible_outputs = torch.tensor(question_varible_outputs)
condition_outputs = torch.tensor(condition_outputs)
question_inputs_mask = build_mask(question_inputs, tokenizer.pad_token_id)
question_varible_outputs_mask = build_mask(question_varible_outputs, tokenizer.pad_token_id)
condition_outputs_mask = build_mask(condition_outputs, tokenizer.pad_token_id)
question_varible_outputs_mask_lm_labels = build_lm_labels(question_varible_outputs, tokenizer.pad_token_id)
condition_outputs_mask_lm_labels = build_lm_labels(condition_outputs, tokenizer.pad_token_id)
return (
question_inputs,
[question_varible_outputs,condition_outputs],
question_inputs_mask,
[question_varible_outputs_mask,condition_outputs_mask],
[question_varible_outputs_mask_lm_labels,condition_outputs_mask_lm_labels],
)
# ----------
# Optimizers
# ----------
class BertSumOptimizer(object):
""" Specific optimizer for BertSum.
As described in [1], the authors fine-tune BertSum for abstractive
summarization using two Adam Optimizers with different warm-up steps and
learning rate. They also use a custom learning rate scheduler.
[1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders."
arXiv preprint arXiv:1908.08345 (2019).
"""
def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8):
self.encoder = model.encoder
self.decoders = model.decoders
self.lr = lr
self.warmup_steps = warmup_steps
self.decoders_parameters=[]
for decoder in model.decoders:
self.decoders_parameters+=decoder.parameters()
self.optimizers = {
"encoder": Adam(
model.encoder.parameters(),
lr=lr["encoder"],
betas=(beta_1, beta_2),
eps=eps,
),
"decoder": Adam(
self.decoders_parameters,
lr=lr["decoder"],
betas=(beta_1, beta_2),
eps=eps,
),
}
self._step = 0
def _update_rate(self, stack):
return self.lr[stack] * min(
self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-0.5)
)
def zero_grad(self):
self.optimizer_decoder.zero_grad()
self.optimizer_encoder.zero_grad()
def step(self):
self._step += 1
for stack, optimizer in self.optimizers.items():
new_rate = self._update_rate(stack)
for param_group in optimizer.param_groups:
param_group["lr"] = new_rate
optimizer.step()
# ------------
# Train
# ------------
def train(args, model, tokenizer):
""" Fine-tune the pretrained model on the corpus. """
set_seed(args)
# Load the data
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_dataset = load_and_cache_examples(args, tokenizer, "train")
train_sampler = RandomSampler(train_dataset)
model_collate_fn = functools.partial(collate, tokenizer=tokenizer,
input_block_size=args.input_block_size,output_block_size=args.output_block_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=model_collate_fn,
)
# Training schedule
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = t_total // (
len(train_dataloader) // args.gradient_accumulation_steps + 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare the optimizer
#lr = {"encoder": 0.002, "decoder": 0.2}
lr = {"encoder": args.encoder_lr, "decoder": args.decoder_lr}
#warmup_steps = {"encoder": 20000, "decoder": 10000}
warmup_steps = {"encoder": args.encoder_warmup, "decoder": args.decoder_warmup}
optimizer = BertSumOptimizer(model, lr, warmup_steps)
# Train
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps
# * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
model.zero_grad()
train_iterator = trange(args.num_train_epochs, desc="Epoch", disable=False)
global_step = 0
tr_loss = 0.0
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=False)
for step, batch in enumerate(epoch_iterator):
source, target, encoder_mask, decoder_mask, lm_labels = batch
#print('source: {}'.format(source))
#print('target: {}'.format(target))
feed_source=None
feed_targets=[None]*len(target)
feed_encoder_mask=None
feed_decoder_masks=[None]*len(decoder_mask)
feed_lm_labels=[None]*len(lm_labels)
feed_source = source.to(args.device)
for i in range(len(target)):
feed_targets[i] = target[i].to(args.device)
feed_encoder_mask = encoder_mask.to(args.device)
for i in range(len(decoder_mask)):
feed_decoder_masks[i] = decoder_mask[i].to(args.device)
for i in range(len(lm_labels)):
feed_lm_labels[i] = lm_labels[i].to(args.device)
model.train()
#print('debug by zhuoyu: source = {}'.format(source))
#print('debug by zhuoyu: target = {}'.format(target))
#print('debug by zhuoyu, device:')
#print('feed source {}'.format(feed_source.device))
#print('feed target {}'.format([str(feed_target.device) for feed_target in feed_targets]))
#print('feed encoder mask {}'.format(feed_encoder_mask.device))
#print('feed decoder masks {}'.format([str(feed_decoder_mask.device) for feed_decoder_mask in feed_decoder_masks]))
#print('feed lm labels {}'.format([str(feed_lm_label.device) for feed_lm_label in feed_lm_labels]))
outputs = model(
feed_source,
feed_targets,
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks,
decoder_lm_labels=feed_lm_labels,
)
loss=0
for i in range(len(model.decoders)):
#print('outputs[{}][0] type: {}'.format(i,type(outputs[i][0])))
loss += outputs[i][0]
#print(loss)
if args.gradient_accumulation_steps > 1:
loss /= args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
model.zero_grad()
global_step += 1
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
# ------------
# Train
# ------------
def evaluate(args, model, tokenizer, prefix=""):
set_seed(args)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_dataset = load_and_cache_examples(args, tokenizer, prefix="dev")
#for example in eval_dataset.examples:
# print(example.example_id)
# print(example.question_input)
# print(example.question_varible_output)
# print(example.condition_output)
#exit(-1)
eval_sampler = SequentialSampler(eval_dataset)
model_collate_fn = functools.partial(collate, tokenizer=tokenizer,
input_block_size=args.input_block_size,output_block_size=args.output_block_size)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,collate_fn=model_collate_fn,
)
# multi-gpu evaluate
#if args.n_gpu > 1:
# model = torch.nn.DataParallel(model)
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
fout=open(os.path.join(args.output_dir,"dev.res"),'w',encoding='utf-8')
fdebug=open(os.path.join(args.output_dir,"dev.debug.res"),'w',encoding='utf-8')
for batch in tqdm(eval_dataloader, desc="Evaluating"):
source, target, encoder_mask, decoder_mask, lm_labels = batch
#print('[SOURCE]: {}'.format(source))
#print('[TARGET]: {}'.format(target))
#source = source.to(args.device)
#target = target.to(args.device)
#encoder_mask = encoder_mask.to(args.device)
#decoder_mask = decoder_mask.to(args.device)
#lm_labels = lm_labels.to(args.device)
feed_source = None
feed_targets = [None] * len(target)
feed_encoder_mask = None
feed_decoder_masks = [None] * len(decoder_mask)
feed_lm_labels = [None] * len(lm_labels)
feed_source = source.to(args.device)
for i in range(len(target)):
feed_targets[i] = target[i].to(args.device)
feed_encoder_mask = encoder_mask.to(args.device)
for i in range(len(decoder_mask)):
feed_decoder_masks[i] = decoder_mask[i].to(args.device)
for i in range(len(lm_labels)):
feed_lm_labels[i] = lm_labels[i].to(args.device)
with torch.no_grad():
if args.decoding_type=='decoding':
tokens_roles=[]
for i in range(len(feed_targets)):
outputs_ids=model.decoding(
feed_source,
feed_targets[i],
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks[i],
decoder_lm_labels=feed_lm_labels[i],
decoder=model.decoders[i]
#fdebug=fdebug,
)
print('outputs size: {}'.format(outputs_ids.size()))
outputs_ids =outputs_ids.cpu().numpy()
batch_tokens=[]
for idx in outputs_ids:
tokens = []
for id in idx:
#print('{}\t{}'.format(id,type(id)))
tokens.append(tokenizer.ids_to_tokens.get(int(id), tokenizer.unk_token))
batch_tokens.append(tokens)
tokens_roles.append(batch_tokens)
def subtoken2token(subtokens):
token=""
tokens=[]
for subtoken in subtokens:
if subtoken.startswith("##"):
token+=subtoken[2:]
else:
if token!="":
tokens.append(token)
token=subtoken
if token!="":
tokens.append(token)
return tokens
for i in range(len(tokens_roles[0])):
fout.write('\t'.join([' '.join(subtoken2token(tokens_roles[0][i]))
,' '.join(subtoken2token(tokens_roles[1][i]))]) + '\n')
else:
print('debug eva input:')
print('feed_source={}'.format(feed_source))
print('feed_targets={}'.format(feed_targets))
print('feed_encoder_mask={}'.format(feed_encoder_mask))
print('feed_decoder_masks={}'.format(feed_decoder_masks))
print('feed_lm_labels={}'.format(feed_lm_labels))
outputs = model(
feed_source,
feed_targets,
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks,
decoder_lm_labels=feed_lm_labels,
#fdebug=fdebug,
)
ans_seqs=[[],[]]
for i in range(len(model.decoders)):
print(outputs[i][1].size())
predicted_scores=outputs[i][1].argmax(-1).cpu().numpy().tolist()
for idx in predicted_scores:
tokens = []
for id in idx:
tokens.append(tokenizer.ids_to_tokens.get(id, tokenizer.unk_token))
ans_seqs[i].append(tokens)
for i in range(len(ans_seqs[0])):
fout.write('\t'.join([' '.join(ans_seqs[0][i]),' '.join(ans_seqs[1][i])]) + '\n')
# print('debug by zhuoyu, predicted_scores size={}'.format(predicted_scores.size()))
#eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity}
# Save the evaluation's results
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
#with open(os.path.join(args.output_dir,"dev.res"),'w',encoding='utf-8') as fout:
fout.flush()
fout.close()
fdebug.flush()
fdebug.close()
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input training data file (a text file).",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Optional parameters
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--do_evaluate",
type=bool,
default=False,
help="Run model evaluation on out-of-sample data.",
)
parser.add_argument("--do_train", type=bool, default=False, help="Run training.")
parser.add_argument(
"--do_overwrite_output_dir",
type=bool,
default=False,
help="Whether to overwrite the output dir.",
)
parser.add_argument(
"--encoder_model_name_or_path",
default="bert-base-cased",
type=str,
help="The model checkpoint to initialize the encoder's weights with.",
)
parser.add_argument(
"--decoder_model_name_or_path",
default="/data/zhuoyu/semantic_parsing/models",
type=str,
help="The model checkpoint to initialize the decoder's weights with.",
)
parser.add_argument(
"--model_type",
default="bert",
type=str,
help="The decoder architecture to be fine-tuned.",
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--to_cpu", default=False, type=bool, help="Whether to force training on CPU."
)
parser.add_argument(
"--num_train_epochs",
default=10,
type=int,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for eval.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--input_block_size",
default=256,
type=int,
help="Max seq length for input",
)
parser.add_argument(
"--output_block_size",
default=64,
type=int,
help="Max seq length for output",
)
parser.add_argument(
"--trained_checkpoints",
default="",
type=str,
help="trained_checkpoints",
)
parser.add_argument(
"--decoding_type",
default="pnt",
type=str,
help="",
)
parser.add_argument(
"--encoder_lr",
default=5e-4,
type=float,
help="encoder's learning rate",
)
parser.add_argument(
"--decoder_lr",
default=5e-4,
type=float,
help="encoder's learning rate",
)
parser.add_argument(
"--encoder_warmup",
default=10,
type=int,
help="encoder's learning rate",
)
parser.add_argument(
"--decoder_warmup",
default=100,
type=int,
help="encoder's learning rate",
)
parser.add_argument("--seed", default=42, type=int)
parser.add_argument(
"--decoder_version",
default="v1",
type=str,
help="",
)
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.do_overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --do_overwrite_output_dir to overwrite.".format(
args.output_dir
)
)
# Set up training device
if args.to_cpu or not torch.cuda.is_available():
args.device = torch.device("cpu")
args.n_gpu = 0
else:
args.device = torch.device("cuda")
args.n_gpu = torch.cuda.device_count()
print(args.n_gpu)
# Load pretrained model and tokenizer. The decoder's weights are randomly initialized.
tokenizer = AutoTokenizer.from_pretrained(args.encoder_model_name_or_path
,never_split=['[unused0]','[unused1]','[unused2]','[unused3]'])
#config = BertConfig.from_pretrained(args.model_name_or_path)
#config.num_hidden_layers=3
#config.is_decoder=True
#decoder_model = BertForMaskedLM(config)
decoder_models=[BertForMaskedLM.from_pretrained(args.decoder_model_name_or_path),
BertForMaskedLM.from_pretrained(args.decoder_model_name_or_path)]
model = Model2Models.from_pretrained(
args.encoder_model_name_or_path, decoder_model=decoder_models
)
#model = Model2Model.from_pretrained(
# args.model_name_or_path, decoder_model=None
#)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
0,
args.device,
args.n_gpu,
False,
False,
)
logger.info("Training/evaluation parameters %s", args)
# Train the model
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if args.do_train:
model.to(args.device)
global_step, tr_loss = train(args, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_arguments.bin"))
# Evaluate the model
results = {}
if args.do_evaluate:
checkpoints = [args.trained_checkpoints]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
encoder_checkpoint = os.path.join(checkpoint, "encoder")
decoder_checkpoint_question_varibles = os.path.join(checkpoint, "decoder_0")
decoder_checkpoint_conditions = os.path.join(checkpoint, "decoder_1")
decoder_models = [BertForMaskedLM.from_pretrained(decoder_checkpoint_question_varibles),
BertForMaskedLM.from_pretrained(decoder_checkpoint_conditions)]
model = Model2Models.from_pretrained(
encoder_checkpoint, decoder_model=decoder_models
)
model.to(args.device)
#model = PreTrainedEncoderDecoder.from_pretrained(
# encoder_checkpoint, decoder_checkpoint
#)
#model = Model2Model.from_pretrained(encoder_checkpoint)
#model.to(args.device)
results = "placeholder"
evaluate(args,model,tokenizer,"test")
return results
if __name__ == "__main__":
main()
| [((24, 0, 24, 23), 'sys.path.append', 'sys.path.append', ({(24, 16, 24, 22): '"""../"""'}, {}), "('../')", False, 'import sys\n'), ((61, 9, 61, 36), 'logging.getLogger', 'logging.getLogger', ({(61, 27, 61, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((62, 0, 62, 58), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((66, 4, 66, 26), 'random.seed', 'random.seed', ({(66, 16, 66, 25): 'args.seed'}, {}), '(args.seed)', False, 'import random\n'), ((67, 4, 67, 29), 'numpy.random.seed', 'np.random.seed', ({(67, 19, 67, 28): 'args.seed'}, {}), '(args.seed)', True, 'import numpy as np\n'), ((68, 4, 68, 32), 'torch.manual_seed', 'torch.manual_seed', ({(68, 22, 68, 31): 'args.seed'}, {}), '(args.seed)', False, 'import torch\n'), ((77, 14, 77, 80), 'utils_chemistry.ChemistryDataset', 'ChemistryDataset', (), '', False, 'from utils_chemistry import ChemistryDataset\n'), ((105, 22, 105, 51), 'torch.tensor', 'torch.tensor', ({(105, 35, 105, 50): 'question_inputs'}, {}), '(question_inputs)', False, 'import torch\n'), ((106, 31, 106, 69), 'torch.tensor', 'torch.tensor', ({(106, 44, 106, 68): 'question_varible_outputs'}, {}), '(question_varible_outputs)', False, 'import torch\n'), ((107, 24, 107, 55), 'torch.tensor', 'torch.tensor', ({(107, 37, 107, 54): 'condition_outputs'}, {}), '(condition_outputs)', False, 'import torch\n'), ((109, 27, 109, 78), 'utils_summarization.build_mask', 'build_mask', ({(109, 38, 109, 53): 'question_inputs', (109, 55, 109, 77): 'tokenizer.pad_token_id'}, {}), '(question_inputs, tokenizer.pad_token_id)', False, 'from utils_summarization import CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids\n'), ((110, 36, 110, 96), 'utils_summarization.build_mask', 'build_mask', ({(110, 47, 110, 71): 'question_varible_outputs', (110, 73, 110, 95): 'tokenizer.pad_token_id'}, {}), '(question_varible_outputs, tokenizer.pad_token_id)', False, 'from utils_summarization import CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids\n'), ((111, 29, 111, 82), 'utils_summarization.build_mask', 'build_mask', ({(111, 40, 111, 57): 'condition_outputs', (111, 59, 111, 81): 'tokenizer.pad_token_id'}, {}), '(condition_outputs, tokenizer.pad_token_id)', False, 'from utils_summarization import CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids\n'), ((113, 46, 113, 111), 'utils_summarization.build_lm_labels', 'build_lm_labels', ({(113, 62, 113, 86): 'question_varible_outputs', (113, 88, 113, 110): 'tokenizer.pad_token_id'}, {}), '(question_varible_outputs, tokenizer.pad_token_id)', False, 'from utils_summarization import CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids\n'), ((114, 39, 114, 97), 'utils_summarization.build_lm_labels', 'build_lm_labels', ({(114, 55, 114, 72): 'condition_outputs', (114, 74, 114, 96): 'tokenizer.pad_token_id'}, {}), '(condition_outputs, tokenizer.pad_token_id)', False, 'from utils_summarization import CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids\n'), ((197, 20, 197, 48), 'torch.utils.data.RandomSampler', 'RandomSampler', ({(197, 34, 197, 47): 'train_dataset'}, {}), '(train_dataset)', False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((198, 23, 199, 121), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((200, 23, 205, 5), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((243, 21, 243, 79), 'tqdm.trange', 'trange', (), '', False, 'from tqdm import tqdm, trange\n'), ((332, 19, 332, 50), 'torch.utils.data.SequentialSampler', 'SequentialSampler', ({(332, 37, 332, 49): 'eval_dataset'}, {}), '(eval_dataset)', False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((333, 23, 334, 121), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n'), ((335, 22, 337, 5), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((351, 17, 351, 57), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm, trange\n'), ((472, 23, 472, 72), 'os.path.join', 'os.path.join', ({(472, 36, 472, 51): 'args.output_dir', (472, 53, 472, 71): '"""eval_results.txt"""'}, {}), "(args.output_dir, 'eval_results.txt')", False, 'import os\n'), ((491, 13, 491, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((667, 16, 668, 109), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (), '', False, 'from transformers import AutoTokenizer, BertForMaskedLM, BertConfig, PreTrainedEncoderDecoder, Model2Models\n'), ((675, 12, 677, 5), 'transformers.Model2Models.from_pretrained', 'Model2Models.from_pretrained', (), '', False, 'from transformers import AutoTokenizer, BertForMaskedLM, BertConfig, PreTrainedEncoderDecoder, Model2Models\n'), ((683, 4, 687, 5), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((88, 23, 88, 98), 'utils_summarization.fit_to_block_size', 'fit_to_block_size', ({(88, 41, 88, 55): 'question_input', (88, 57, 88, 73): 'input_block_size', (88, 75, 88, 97): 'tokenizer.pad_token_id'}, {}), '(question_input, input_block_size, tokenizer.pad_token_id)', False, 'from utils_summarization import CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids\n'), ((95, 32, 95, 117), 'utils_summarization.fit_to_block_size', 'fit_to_block_size', ({(95, 50, 95, 73): 'question_varible_output', (95, 75, 95, 92): 'output_block_size', (95, 94, 95, 116): 'tokenizer.pad_token_id'}, {}), '(question_varible_output, output_block_size, tokenizer.\n pad_token_id)', False, 'from utils_summarization import CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids\n'), ((102, 25, 102, 103), 'utils_summarization.fit_to_block_size', 'fit_to_block_size', ({(102, 43, 102, 59): 'condition_output', (102, 61, 102, 78): 'output_block_size', (102, 80, 102, 102): 'tokenizer.pad_token_id'}, {}), '(condition_output, output_block_size, tokenizer.pad_token_id)', False, 'from utils_summarization import CNNDailyMailDataset, encode_for_summarization, fit_to_block_size, build_lm_labels, build_mask, compute_token_type_ids\n'), ((248, 25, 248, 80), 'tqdm.tqdm', 'tqdm', (), '', False, 'from tqdm import tqdm, trange\n'), ((349, 14, 349, 53), 'os.path.join', 'os.path.join', ({(349, 27, 349, 42): 'args.output_dir', (349, 43, 349, 52): '"""dev.res"""'}, {}), "(args.output_dir, 'dev.res')", False, 'import os\n'), ((350, 16, 350, 61), 'os.path.join', 'os.path.join', ({(350, 29, 350, 44): 'args.output_dir', (350, 45, 350, 60): '"""dev.debug.res"""'}, {}), "(args.output_dir, 'dev.debug.res')", False, 'import os\n'), ((467, 27, 467, 50), 'torch.tensor', 'torch.tensor', ({(467, 40, 467, 49): 'eval_loss'}, {}), '(eval_loss)', False, 'import torch\n'), ((473, 11, 473, 42), 'os.path.exists', 'os.path.exists', ({(473, 26, 473, 41): 'args.output_dir'}, {}), '(args.output_dir)', False, 'import os\n'), ((474, 8, 474, 36), 'os.makedirs', 'os.makedirs', ({(474, 20, 474, 35): 'args.output_dir'}, {}), '(args.output_dir)', False, 'import os\n'), ((644, 24, 644, 55), 'os.path.exists', 'os.path.exists', ({(644, 39, 644, 54): 'args.output_dir'}, {}), '(args.output_dir)', False, 'import os\n'), ((645, 24, 645, 51), 'os.listdir', 'os.listdir', ({(645, 35, 645, 50): 'args.output_dir'}, {}), '(args.output_dir)', False, 'import os\n'), ((659, 22, 659, 41), 'torch.device', 'torch.device', ({(659, 35, 659, 40): '"""cpu"""'}, {}), "('cpu')", False, 'import torch\n'), ((662, 22, 662, 42), 'torch.device', 'torch.device', ({(662, 35, 662, 41): '"""cuda"""'}, {}), "('cuda')", False, 'import torch\n'), ((663, 21, 663, 46), 'torch.cuda.device_count', 'torch.cuda.device_count', ({}, {}), '()', False, 'import torch\n'), ((673, 20, 673, 84), 'transformers.BertForMaskedLM.from_pretrained', 'BertForMaskedLM.from_pretrained', ({(673, 52, 673, 83): 'args.decoder_model_name_or_path'}, {}), '(args.decoder_model_name_or_path)', False, 'from transformers import AutoTokenizer, BertForMaskedLM, BertConfig, PreTrainedEncoderDecoder, Model2Models\n'), ((674, 20, 674, 84), 'transformers.BertForMaskedLM.from_pretrained', 'BertForMaskedLM.from_pretrained', ({(674, 52, 674, 83): 'args.decoder_model_name_or_path'}, {}), '(args.decoder_model_name_or_path)', False, 'from transformers import AutoTokenizer, BertForMaskedLM, BertConfig, PreTrainedEncoderDecoder, Model2Models\n'), ((701, 11, 701, 42), 'os.path.exists', 'os.path.exists', ({(701, 26, 701, 41): 'args.output_dir'}, {}), '(args.output_dir)', False, 'import os\n'), ((702, 12, 702, 40), 'os.makedirs', 'os.makedirs', ({(702, 24, 702, 39): 'args.output_dir'}, {}), '(args.output_dir)', False, 'import os\n'), ((157, 23, 162, 13), 'torch.optim.Adam', 'Adam', (), '', False, 'from torch.optim import Adam\n'), ((379, 13, 379, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((658, 26, 658, 51), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((719, 25, 719, 80), 'os.path.join', 'os.path.join', ({(719, 38, 719, 53): 'args.output_dir', (719, 55, 719, 79): '"""training_arguments.bin"""'}, {}), "(args.output_dir, 'training_arguments.bin')", False, 'import os\n'), ((727, 33, 727, 68), 'os.path.join', 'os.path.join', ({(727, 46, 727, 56): 'checkpoint', (727, 58, 727, 67): '"""encoder"""'}, {}), "(checkpoint, 'encoder')", False, 'import os\n'), ((728, 51, 728, 88), 'os.path.join', 'os.path.join', ({(728, 64, 728, 74): 'checkpoint', (728, 76, 728, 87): '"""decoder_0"""'}, {}), "(checkpoint, 'decoder_0')", False, 'import os\n'), ((729, 44, 729, 81), 'os.path.join', 'os.path.join', ({(729, 57, 729, 67): 'checkpoint', (729, 69, 729, 80): '"""decoder_1"""'}, {}), "(checkpoint, 'decoder_1')", False, 'import os\n'), ((733, 20, 735, 13), 'transformers.Model2Models.from_pretrained', 'Model2Models.from_pretrained', (), '', False, 'from transformers import AutoTokenizer, BertForMaskedLM, BertConfig, PreTrainedEncoderDecoder, Model2Models\n'), ((731, 30, 731, 99), 'transformers.BertForMaskedLM.from_pretrained', 'BertForMaskedLM.from_pretrained', ({(731, 62, 731, 98): 'decoder_checkpoint_question_varibles'}, {}), '(decoder_checkpoint_question_varibles)', False, 'from transformers import AutoTokenizer, BertForMaskedLM, BertConfig, PreTrainedEncoderDecoder, Model2Models\n'), ((732, 30, 732, 92), 'transformers.BertForMaskedLM.from_pretrained', 'BertForMaskedLM.from_pretrained', ({(732, 62, 732, 91): 'decoder_checkpoint_conditions'}, {}), '(decoder_checkpoint_conditions)', False, 'from transformers import AutoTokenizer, BertForMaskedLM, BertConfig, PreTrainedEncoderDecoder, Model2Models\n')] |
MTES-MCT/envergo | envergo/geodata/management/commands/import_shapefiles.py | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | from django.contrib.gis.gdal import DataSource
from django.contrib.gis.utils import LayerMapping
from django.core.management.base import BaseCommand
from envergo.geodata.models import Zone
class Command(BaseCommand):
help = "Importe des zones à partir de shapefiles."
def add_arguments(self, parser):
parser.add_argument("shapefile", type=str)
def handle(self, *args, **options):
shapefile = options["shapefile"]
ds = DataSource(shapefile)
mapping = {"code": "CODEZONE", "polygon": "POLYGON"}
lm = LayerMapping(Zone, ds, mapping)
self.stdout.write(self.style.SUCCESS("Importing"))
lm.save(verbose=True)
| [((16, 13, 16, 34), 'django.contrib.gis.gdal.DataSource', 'DataSource', ({(16, 24, 16, 33): 'shapefile'}, {}), '(shapefile)', False, 'from django.contrib.gis.gdal import DataSource\n'), ((18, 13, 18, 44), 'django.contrib.gis.utils.LayerMapping', 'LayerMapping', ({(18, 26, 18, 30): 'Zone', (18, 32, 18, 34): 'ds', (18, 36, 18, 43): 'mapping'}, {}), '(Zone, ds, mapping)', False, 'from django.contrib.gis.utils import LayerMapping\n')] |
duanqiaobb/vim-for-java | vimfiles/bundle/ultisnips/test/test_AnonymousExpansion.py | 01b60e4494e65a73c9a9de00f50259d8a7c8d0bb | from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# Anonymous Expansion {{{#
class _AnonBase(_VimTest):
args = ''
def _extra_options_pre_init(self, vim_config):
vim_config.append('inoremap <silent> %s <C-R>=UltiSnips#Anon(%s)<cr>'
% (EA, self.args))
class Anon_NoTrigger_Simple(_AnonBase):
args = '"simple expand"'
keys = 'abc' + EA
wanted = 'abcsimple expand'
class Anon_NoTrigger_AfterSpace(_AnonBase):
args = '"simple expand"'
keys = 'abc ' + EA
wanted = 'abc simple expand'
class Anon_NoTrigger_BeginningOfLine(_AnonBase):
args = r"':latex:\`$1\`$0'"
keys = EA + 'Hello' + JF + 'World'
wanted = ':latex:`Hello`World'
class Anon_NoTrigger_FirstCharOfLine(_AnonBase):
args = r"':latex:\`$1\`$0'"
keys = ' ' + EA + 'Hello' + JF + 'World'
wanted = ' :latex:`Hello`World'
class Anon_NoTrigger_Multi(_AnonBase):
args = '"simple $1 expand $1 $0"'
keys = 'abc' + EA + '123' + JF + '456'
wanted = 'abcsimple 123 expand 123 456'
class Anon_Trigger_Multi(_AnonBase):
args = '"simple $1 expand $1 $0", "abc"'
keys = '123 abc' + EA + '123' + JF + '456'
wanted = '123 simple 123 expand 123 456'
class Anon_Trigger_Simple(_AnonBase):
args = '"simple expand", "abc"'
keys = 'abc' + EA
wanted = 'simple expand'
class Anon_Trigger_Twice(_AnonBase):
args = '"simple expand", "abc"'
keys = 'abc' + EA + '\nabc' + EX
wanted = 'simple expand\nabc' + EX
class Anon_Trigger_Opts(_AnonBase):
args = '"simple expand", ".*abc", "desc", "r"'
keys = 'blah blah abc' + EA
wanted = 'simple expand'
# End: Anonymous Expansion #}}}
| [] |
jkchen2/JshBot-plugins | data_converter/data_converter.py | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | import discord
from jshbot import utilities, data, configurations, plugins, logger
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.0'
CBException = ConfiguredBotException('0.3 to 0.4 plugin')
@plugins.command_spawner
def get_commands(bot):
return [Command('convertdata', hidden=True, elevated_level=3)]
async def get_response(bot, context):
for guild in bot.guilds:
convert_core(bot, guild)
if 'tags.py' in bot.plugins:
convert_tags(bot, guild)
return Response("Converted.")
def convert_core(bot, guild):
if data.get(bot, 'core', None, guild_id=guild.id):
logger.warn("Guild %s (%s) already had core converted", guild.name, guild.id)
return
base_data = data.get(bot, 'base', None, guild_id=guild.id, default={})
if 'disabled' in base_data:
# TODO: Iterate through toggled commands
pass
if 'blocked' in base_data:
replacement = []
for entry in base_data['blocked']:
replacement.append(int(entry))
base_data['blocked'] = replacement
if 'muted_channels' in base_data:
replacement = []
for entry in base_data['muted_channels']:
replacement.append(int(entry))
base_data['muted_channels'] = replacement
if 'moderators' in base_data:
del base_data['moderators']
if base_data:
for key, value in base_data.items():
data.add(bot, 'core', key, value, guild_id=guild.id)
data.remove(bot, 'base', None, guild_id=guild.id)
def convert_tags(bot, guild):
if not data.get(bot, 'tags.py', 'tags', guild_id=guild.id):
logger.warn("Guild %s (%s) already had tags converted", guild.name, guild.id)
return
tags = data.get(bot, 'tags.py', 'tags', guild_id=guild.id, default={})
add_tag = bot.plugins['tags.py']._add_tag
#key,value,length,volume,name,flags,author,hits,created,last_used,last_used_by,complex,extra
for key, tag in tags.items():
to_insert = [
key, # key
tag['value'], # value
tag['length'], # length
tag['volume'], # volume
tag['name'], # name
tag['flags'], # flags
int(tag['author']), # author
tag['hits'], # hits
int(tag['created']), # created
int(tag['last_used']), # last_used
None, # last_used_by
{}, # complex
{} # extra
]
add_tag(bot, to_insert, guild.id)
data.remove(bot, 'tags.py', 'tags', guild_id=guild.id, safe=True)
| [((9, 14, 9, 57), 'jshbot.exceptions.ConfiguredBotException', 'ConfiguredBotException', ({(9, 37, 9, 56): '"""0.3 to 0.4 plugin"""'}, {}), "('0.3 to 0.4 plugin')", False, 'from jshbot.exceptions import BotException, ConfiguredBotException\n'), ((20, 11, 20, 33), 'jshbot.commands.Response', 'Response', ({(20, 20, 20, 32): '"""Converted."""'}, {}), "('Converted.')", False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response\n'), ((24, 7, 24, 53), 'jshbot.data.get', 'data.get', (), '', False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((27, 16, 27, 74), 'jshbot.data.get', 'data.get', (), '', False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((54, 11, 54, 74), 'jshbot.data.get', 'data.get', (), '', False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((74, 4, 74, 69), 'jshbot.data.remove', 'data.remove', (), '', False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((13, 12, 13, 65), 'jshbot.commands.Command', 'Command', (), '', False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response\n'), ((25, 8, 25, 85), 'jshbot.logger.warn', 'logger.warn', ({(25, 20, 25, 62): '"""Guild %s (%s) already had core converted"""', (25, 64, 25, 74): 'guild.name', (25, 76, 25, 84): 'guild.id'}, {}), "('Guild %s (%s) already had core converted', guild.name, guild.id)", False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((46, 8, 46, 57), 'jshbot.data.remove', 'data.remove', (), '', False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((50, 11, 50, 62), 'jshbot.data.get', 'data.get', (), '', False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((51, 8, 51, 85), 'jshbot.logger.warn', 'logger.warn', ({(51, 20, 51, 62): '"""Guild %s (%s) already had tags converted"""', (51, 64, 51, 74): 'guild.name', (51, 76, 51, 84): 'guild.id'}, {}), "('Guild %s (%s) already had tags converted', guild.name, guild.id)", False, 'from jshbot import utilities, data, configurations, plugins, logger\n'), ((45, 12, 45, 64), 'jshbot.data.add', 'data.add', (), '', False, 'from jshbot import utilities, data, configurations, plugins, logger\n')] |
ankit98040/TKINTER-JIS | tut2.py | 8b650138bf8ab2449da83e910ee33c0caee69a8d | from tkinter import *
from PIL import Image, ImageTk
#python image library
#imagetk supports jpg image
a1 = Tk()
a1.geometry("455x244")
#for png image
#photo = PhotoImage(file="filename.png")
#a2 = Label(image = photo)
#a2.pack()
image = Image.open("PJXlVd.jpg")
photo = ImageTk.PhotoImage(image)
a2 = Label(image = photo)
a2.pack()
a1.mainloop() | [((15, 8, 15, 32), 'PIL.Image.open', 'Image.open', ({(15, 19, 15, 31): '"""PJXlVd.jpg"""'}, {}), "('PJXlVd.jpg')", False, 'from PIL import Image, ImageTk\n'), ((16, 8, 16, 33), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ({(16, 27, 16, 32): 'image'}, {}), '(image)', False, 'from PIL import Image, ImageTk\n')] |
mintanwei/IPCLs-Net | dataset.py | 04937df683216a090c0749cc90ab7e517dbab0fd | import os
import torch
from PIL import Image
from read_csv import csv_to_label_and_bbx
import numpy as np
from torch.utils.data import Subset, random_split, ConcatDataset
class NBIDataset(object):
def __init__(self, root, transforms, nob3=False):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "images"))))
self.boxes = csv_to_label_and_bbx(os.path.join(self.root, "annotations.csv"), nob3)
def __getitem__(self, idx):
img_path = os.path.join(self.root, "images", self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotations = self.boxes[self.imgs[idx]]
boxes = annotations['bbx']
labels = annotations['labels']
# FloatTensor[N, 4]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Int64Tensor[N]
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((labels.size()[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
# target["image_path"] = img_path
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, target
def __len__(self):
return len(self.imgs)
class NBINewDataset(object):
def __init__(self, root, transforms, train=True):
self.root = root
self.transforms = transforms
if train:
self.path = os.path.join(root, "train")
else:
self.path = os.path.join(root, "test")
self.imgs = list(sorted(os.listdir(self.path)))
self.boxes = csv_to_label_and_bbx(os.path.join(self.root, "annotations_all.csv"), img_names=self.imgs)
def __getitem__(self, idx):
img_path = os.path.join(self.path, self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotations = self.boxes[self.imgs[idx]]
boxes = annotations['bbx']
labels = annotations['labels']
# FloatTensor[N, 4]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Int64Tensor[N]
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((labels.size()[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
# target["image_path"] = img_path
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, target
def __len__(self):
return len(self.imgs)
class NBIFullDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
self.path = os.path.join(root, "all")
self.imgs = list(sorted(os.listdir(self.path)))
self.boxes = csv_to_label_and_bbx(os.path.join(self.root, "annotations.csv"), img_names=self.imgs)
def __getitem__(self, idx):
img_path = os.path.join(self.path, self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotations = self.boxes[self.imgs[idx]]
boxes = annotations['bbx']
labels = annotations['labels']
# FloatTensor[N, 4]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Int64Tensor[N]
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((labels.size()[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
# target["image_path"] = img_path
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, target
def __len__(self):
return len(self.imgs)
class NBIDenseDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "images"))))
def __getitem__(self, idx):
img_path = os.path.join(self.root, "images", self.imgs[idx])
img = Image.open(img_path).convert("RGB")
density_path = os.path.join(self.root, "density_maps")
density_map = np.load(os.path.join(density_path, self.imgs[idx][:-4] + ".npy"))
density_map = torch.from_numpy(density_map)
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, density_map
def __len__(self):
return len(self.imgs)
class NBIPatchDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.imgs = [x for x in list(sorted(os.listdir(root))) if x[-3:] == "png"]
self.ans = np.load(os.path.join(root, "ans.npy"), allow_pickle=True).item()
def __getitem__(self, idx):
# img_path = os.path.join(self.root, "images", self.imgs[idx])
# img = Image.open(img_path).convert("RGB")
# density_path = os.path.join(self.root, "density_maps")
# density_map = np.load(os.path.join(density_path, self.imgs[idx][:-4] + ".npy"))
# density_map = torch.from_numpy(density_map)
#
# if self.transforms is not None:
# img = self.transforms(img)
# # target = self.transforms(target)
return self.imgs[idx]
def __len__(self):
return len(self.imgs)
def split_index(K=5, len=100):
idx = list(range(len))
final_list = []
for i in range(K):
final_list.append(idx[(i*len)//K:((i+1)*len)//K])
return final_list
def k_fold_index(K=5, len=100, fold=0):
split = split_index(K, len)
val = split[fold]
train = []
for i in range(K):
if i != fold:
train = train + split[i]
return train, val
def stat_dataset(dataset):
class_ids = {1: "A", 2: "B1", 3: "B2", 4: "B3"}
stats = {"A": 0, "B1": 0, "B2": 0, "B3": 0}
for img, target in dataset:
for k in target['labels']:
stats[class_ids[int(k)]] += 1
print(stats)
def NBIFiveFoldDataset(transforms):
ds = NBIFullDataset(root="./NBI_full_dataset/", transforms=transforms)
# n = len(ds)
# for i in range(5):
# train_idx, val_idx = k_fold_index(5, n, i)
# train_subset = Subset(ds, train_idx)
# val_subset = Subset(ds, val_idx)
# print("Fold: %d" % i, len(train_subset), len(val_subset))
# stat_dataset(train_subset)
# stat_dataset(val_subset)
torch.manual_seed(13)
all_subsets = random_split(ds, [46, 46, 46, 45, 45])
fold_i_subsets = []
for i in range(5):
val_subset = all_subsets[i]
train_subset = ConcatDataset([all_subsets[j] for j in range(5) if j != i])
fold_i_subsets.append({"train": train_subset, "val": val_subset})
# print("Fold: %d" % i, len(train_subset), len(val_subset))
# stat_dataset(train_subset)
# stat_dataset(val_subset)
return fold_i_subsets
if __name__ == '__main__':
# ds = NBIFiveFoldDataset(None)
di = "aaa".encode("UTF-8")
result = eval(di)
print(result)
| [((233, 4, 233, 25), 'torch.manual_seed', 'torch.manual_seed', ({(233, 22, 233, 24): '(13)'}, {}), '(13)', False, 'import torch\n'), ((234, 18, 234, 56), 'torch.utils.data.random_split', 'random_split', ({(234, 31, 234, 33): 'ds', (234, 35, 234, 55): '[46, 46, 46, 45, 45]'}, {}), '(ds, [46, 46, 46, 45, 45])', False, 'from torch.utils.data import Subset, random_split, ConcatDataset\n'), ((18, 19, 18, 68), 'os.path.join', 'os.path.join', ({(18, 32, 18, 41): 'self.root', (18, 43, 18, 51): '"""images"""', (18, 53, 18, 67): 'self.imgs[idx]'}, {}), "(self.root, 'images', self.imgs[idx])", False, 'import os\n'), ((26, 16, 26, 59), 'torch.as_tensor', 'torch.as_tensor', (), '', False, 'import torch\n'), ((29, 17, 29, 59), 'torch.as_tensor', 'torch.as_tensor', (), '', False, 'import torch\n'), ((31, 19, 31, 38), 'torch.tensor', 'torch.tensor', ({(31, 32, 31, 37): '[idx]'}, {}), '([idx])', False, 'import torch\n'), ((68, 19, 68, 58), 'os.path.join', 'os.path.join', ({(68, 32, 68, 41): 'self.path', (68, 43, 68, 57): 'self.imgs[idx]'}, {}), '(self.path, self.imgs[idx])', False, 'import os\n'), ((76, 16, 76, 59), 'torch.as_tensor', 'torch.as_tensor', (), '', False, 'import torch\n'), ((79, 17, 79, 59), 'torch.as_tensor', 'torch.as_tensor', (), '', False, 'import torch\n'), ((81, 19, 81, 38), 'torch.tensor', 'torch.tensor', ({(81, 32, 81, 37): '[idx]'}, {}), '([idx])', False, 'import torch\n'), ((107, 20, 107, 45), 'os.path.join', 'os.path.join', ({(107, 33, 107, 37): 'root', (107, 39, 107, 44): '"""all"""'}, {}), "(root, 'all')", False, 'import os\n'), ((112, 19, 112, 58), 'os.path.join', 'os.path.join', ({(112, 32, 112, 41): 'self.path', (112, 43, 112, 57): 'self.imgs[idx]'}, {}), '(self.path, self.imgs[idx])', False, 'import os\n'), ((120, 16, 120, 59), 'torch.as_tensor', 'torch.as_tensor', (), '', False, 'import torch\n'), ((123, 17, 123, 59), 'torch.as_tensor', 'torch.as_tensor', (), '', False, 'import torch\n'), ((125, 19, 125, 38), 'torch.tensor', 'torch.tensor', ({(125, 32, 125, 37): '[idx]'}, {}), '([idx])', False, 'import torch\n'), ((155, 19, 155, 68), 'os.path.join', 'os.path.join', ({(155, 32, 155, 41): 'self.root', (155, 43, 155, 51): '"""images"""', (155, 53, 155, 67): 'self.imgs[idx]'}, {}), "(self.root, 'images', self.imgs[idx])", False, 'import os\n'), ((157, 23, 157, 62), 'os.path.join', 'os.path.join', ({(157, 36, 157, 45): 'self.root', (157, 47, 157, 61): '"""density_maps"""'}, {}), "(self.root, 'density_maps')", False, 'import os\n'), ((159, 22, 159, 51), 'torch.from_numpy', 'torch.from_numpy', ({(159, 39, 159, 50): 'density_map'}, {}), '(density_map)', False, 'import torch\n'), ((15, 42, 15, 84), 'os.path.join', 'os.path.join', ({(15, 55, 15, 64): 'self.root', (15, 66, 15, 83): '"""annotations.csv"""'}, {}), "(self.root, 'annotations.csv')", False, 'import os\n'), ((58, 24, 58, 51), 'os.path.join', 'os.path.join', ({(58, 37, 58, 41): 'root', (58, 43, 58, 50): '"""train"""'}, {}), "(root, 'train')", False, 'import os\n'), ((60, 24, 60, 50), 'os.path.join', 'os.path.join', ({(60, 37, 60, 41): 'root', (60, 43, 60, 49): '"""test"""'}, {}), "(root, 'test')", False, 'import os\n'), ((64, 42, 64, 88), 'os.path.join', 'os.path.join', ({(64, 55, 64, 64): 'self.root', (64, 66, 64, 87): '"""annotations_all.csv"""'}, {}), "(self.root, 'annotations_all.csv')", False, 'import os\n'), ((109, 42, 109, 84), 'os.path.join', 'os.path.join', ({(109, 55, 109, 64): 'self.root', (109, 66, 109, 83): '"""annotations.csv"""'}, {}), "(self.root, 'annotations.csv')", False, 'import os\n'), ((158, 30, 158, 86), 'os.path.join', 'os.path.join', ({(158, 43, 158, 55): 'density_path', (158, 57, 158, 85): "self.imgs[idx][:-4] + '.npy'"}, {}), "(density_path, self.imgs[idx][:-4] + '.npy')", False, 'import os\n'), ((19, 14, 19, 34), 'PIL.Image.open', 'Image.open', ({(19, 25, 19, 33): 'img_path'}, {}), '(img_path)', False, 'from PIL import Image\n'), ((62, 32, 62, 53), 'os.listdir', 'os.listdir', ({(62, 43, 62, 52): 'self.path'}, {}), '(self.path)', False, 'import os\n'), ((69, 14, 69, 34), 'PIL.Image.open', 'Image.open', ({(69, 25, 69, 33): 'img_path'}, {}), '(img_path)', False, 'from PIL import Image\n'), ((108, 32, 108, 53), 'os.listdir', 'os.listdir', ({(108, 43, 108, 52): 'self.path'}, {}), '(self.path)', False, 'import os\n'), ((113, 14, 113, 34), 'PIL.Image.open', 'Image.open', ({(113, 25, 113, 33): 'img_path'}, {}), '(img_path)', False, 'from PIL import Image\n'), ((156, 14, 156, 34), 'PIL.Image.open', 'Image.open', ({(156, 25, 156, 33): 'img_path'}, {}), '(img_path)', False, 'from PIL import Image\n'), ((14, 43, 14, 71), 'os.path.join', 'os.path.join', ({(14, 56, 14, 60): 'root', (14, 62, 14, 70): '"""images"""'}, {}), "(root, 'images')", False, 'import os\n'), ((152, 43, 152, 71), 'os.path.join', 'os.path.join', ({(152, 56, 152, 60): 'root', (152, 62, 152, 70): '"""images"""'}, {}), "(root, 'images')", False, 'import os\n'), ((177, 27, 177, 56), 'os.path.join', 'os.path.join', ({(177, 40, 177, 44): 'root', (177, 46, 177, 55): '"""ans.npy"""'}, {}), "(root, 'ans.npy')", False, 'import os\n'), ((176, 44, 176, 60), 'os.listdir', 'os.listdir', ({(176, 55, 176, 59): 'root'}, {}), '(root)', False, 'import os\n')] |
FeliciaMJ/PythonLearningJourney | design_patterns/chapter5/mymath.py | ae1bfac872ee29256e69df6e0e8e507321404cba | # coding: utf-8
import functools
def memoize(fn):
known = dict()
@functools.wraps(fn)
def memoizer(*args):
if args not in known:
known[args] = fn(*args)
return known[args]
return memoizer
@memoize
def nsum(n):
'''返回前n个数字的和'''
assert(n >= 0), 'n must be >= 0'
return 0 if n == 0 else n + nsum(n-1)
@memoize
def fibonacci(n):
'''返回斐波那契数列的第n个数'''
assert(n >= 0), 'n must be >= 0'
return n if n in (0, 1) else fibonacci(n-1) + fibonacci(n-2)
if __name__ == '__main__':
from timeit import Timer
measure = [{'exec': 'fibonacci(100)', 'import': 'fibonacci',
'func': fibonacci}, {'exec': 'nsum(200)', 'import': 'nsum',
'func': nsum}]
for m in measure:
t = Timer('{}'.format(m['exec']), 'from __main__ import \
{}'.format(m['import']))
print('name: {}, doc: {}, executing: {}, time: \
{}'.format(m['func'].__name__, m['func'].__doc__,
m['exec'], t.timeit()))
| [((9, 5, 9, 24), 'functools.wraps', 'functools.wraps', ({(9, 21, 9, 23): 'fn'}, {}), '(fn)', False, 'import functools\n')] |
yangyuke001/emotion-expression.shufflenetv2 | transforms/__init__.py | d70fd17871fb758eb4fc7d2f9df430cc7e44ad64 |
from .transforms import *
| [] |
adRenaud/research | codes/elastoplasticity_spectralAnalysis/planeStress/slowWavePlaneStressSigDriven.py | 2f0062a1800d7a17577bbfc2393b084253d567f4 | # !\usr\bin\python
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import scipy.optimize
from matplotlib import animation
from scipy.integrate import ode
import pdb
# Material parameters
rho = 7800.
E = 2.e11
nu = 0.3
mu = 0.5*E/(1.+nu)
kappa = E/(3.*(1.-2.*nu))
lamb = kappa-2.*mu/3.
sigy = 100.0e6
H = 100.08e6
beta=(6.*mu**2)/(3.*mu+H)
def tangentModulus(sigma,lamb,mu,beta,tangent):
H=np.zeros((3,3))
# |H1111 H1112 H1122|
# H =|H1211 H1212 H1222|
# |H2211 H2212 H2222|
# sigma = [sig11 , sig12 , sig22 , sig33 ]
sigDev = computeDeviatoricPart(sigma)
sigdnorm2=np.dot(sigDev,sigDev)
BETA=beta/sigdnorm2
s11=sigDev[0];s12=sigDev[1]/np.sqrt(2.);s22=sigDev[2];s33=sigDev[3]
## Plane stress tangent modulus Hijkl = Hijkl - Hij33*H33kl/H3333
H1133=(lamb -BETA*s11*s33)
H1233=(-BETA*s12*s33)
H1122=(lamb -BETA*s11*s22)
H2222=(lamb+2.*mu -BETA*s22**2)
H1222=(-BETA*s12*s22)
H2233=(lamb-BETA*s22*s33)
H3333=(lamb+2.*mu-BETA*s33*s33)
if tangent=='planeStress':
H[0,0]=lamb+2.*mu - BETA*s11**2 -H1133*H1133/H3333
H[0,1]=-BETA*s11*s12 -H1133*H1233/H3333
H[0,2]=lamb-BETA*s11*s22 -H1133*H2233/H3333
H[1,0]=-BETA*s12*s11-H1233*H1133/H3333
H[1,1]=mu-BETA*s12**2 -H1233*H1233/H3333
H[1,2]=-BETA*s12*s22-H1233*H2233/H3333
H[2,0]=lamb - BETA*s11*s22 -H2233*H1133/H3333
H[2,1]=-BETA*s22*s12 -H2233*H1233/H3333
H[2,2]=lamb+2.*mu-BETA*s22**2 -H2233*H2233/H3333
elif tangent=='thinWalled':
H[0,0]=lamb+2.*mu - BETA*s11**2 -H1122*(H1122+H1133)/(H2233+H2222)
H[0,1]=-BETA*s11*s12 -H1222*(H1122+H1133)/(H2233+H2222)
H[0,2]=lamb-BETA*s11*s22
H[1,0]=-BETA*s12*s11-H1122*(H1222+H1233)/(H2233+H2222)
H[1,1]=mu-BETA*s12**2-H1222*(H1222+H1233)/(H2233+H2222)
H[1,2]=-BETA*s12*s22
H[2,0]=lamb - BETA*s11*s22
H[2,1]=-BETA*s22*s12
H[2,2]=lamb+2.*mu-BETA*s22**2
else :
H[0,0]=lamb+2.*mu - BETA*s11**2
H[0,1]=-BETA*s11*s12
H[0,2]=lamb-BETA*s11*s12
H[1,0]=-BETA*s12*s11
H[1,1]=mu-BETA*s12**2
H[1,2]=-BETA*s12*s22
H[2,0]=lamb-BETA*s11*s22
H[2,1]=-BETA*s12*s22
H[2,2]=lamb+2.*mu-BETA*s22**2
return H
def acousticTensor(H,n):
n1=n[0] ; n2=n[1]
C11 = H[0,0]*n1**2 + H[1,1]*n2**2 + 2.*H[0,1]*n1*n2
C12 = H[0,1]*n1**2 + H[1,2]*n2**2 + (H[0,2]+H[1,1])*n1*n2
C22 = H[1,1]*n1**2 + H[2,2]*n2**2 + 2.*H[2,1]*n1*n2
return np.array([C11,C12,C22])
def acousticEigenStructure(C):
C11=C[0];C12=C[1];C22=C[2]
## omega1,w1 associated to cf
## omega2,w2 associated to cs
omega1=0.5*(C11+C22 + np.sqrt((C11-C22)**2+4.*C12**2))
omega2=0.5*(C11+C22 - np.sqrt((C11-C22)**2+4.*C12**2))
w1=np.array([-C12,C11-omega1])
w2=np.array([-C12,C11-omega2])
return [omega1,w1],[omega2,w2]
def vonMisesYieldSurface(sigy):
radius=np.sqrt((2./3.)*sigy**2)
theta=np.linspace(0,2*np.pi,50)
s2 = radius*np.cos(theta)
s3 = radius*np.sin(theta)
s1=0.
c=np.sqrt(2.)/2.;
s=np.sqrt(2.)/2.;
P2=np.array([[c,-c,0.],[c,c,0.],[0.,0.,1.]])
P1=np.array([[c,0.,-c],[0.,1.,0.],[c,0.,c]])
c=np.cos(np.arctan(1./np.sqrt(2.0)))
s=np.sin(np.arctan(1./np.sqrt(2.0)))
P1=np.array([[c,0.,-s],[0.,1.,0.],[s,0.,c]])
cylindre=np.zeros((3,len(s2)))
for i in range(len(s2)):
cylindre[:,i] = np.dot(P2,np.dot(P1,np.array([s1,s2[i],s3[i]])))
return cylindre
def computeDeviatoricPart(T):
# T = [T11 T21 T22 T33]
Pdev=np.array([[1.-1/3.,0.,-1./3.,-1./3.],[0.,1.,0.,0.],[-1./3.,0.,1.-1./3.,-1./3.],[-1./3.,0.,-1./3.,1.-1./3.]])
Tdev=np.dot(Pdev,T)
return np.array([Tdev[0],np.sqrt(2.)*Tdev[1],Tdev[2],Tdev[3]])
def computeCriterion(sig11,sig22,sig12,sig33,sigy):
# deviatoric stress
sDev=computeDeviatoricPart(np.array([sig11,sig12,sig22,sig33]))
normSDev=np.sqrt(np.dot(sDev,sDev))
f=np.sqrt(3./2.)*normSDev - sigy
return f
def computePsiSlow(sig11,sigma,sig33,lamb,mu,beta,tangent,rho):
# sig11 driven
n1=1.;n2=0.
sig12=sigma[0];sig22=sigma[1]
H=tangentModulus(np.array([sig11,sig12,sig22,sig33]),lamb,mu,beta,tangent)
C=acousticTensor(H,np.array([n1,n2]))
eigenf,eigens=acousticEigenStructure(C)
alpha11=H[0,1]*H[1,2]- H[1,1]*H[0,2]
alpha12=-H[0,1]*H[0,2]-H[0,0]*H[2,1]
alpha22=H[0,0]*H[1,1]-H[0,1]**2
w1=eigenf[1][0];w2=eigenf[1][1]
psi12=-2.*w1/w2
psi22=(2.*w1*alpha12/w2-alpha11)/alpha22
"""
n1=1.;n2=0.
JN=-np.array([[0.,0.,n1/rho,n2/rho,0.],[0.,0.,0.,n1/rho,n2/rho],[H[0,0]*n1+H[0,1]*n2,H[0,1]*n1+H[0,2]*n2,0.,0.,0.],[H[0,1]*n1+H[1,1]*n2,H[1,1]*n1+H[1,2]*n2,0,0,0],[H[2,0]*n1+H[2,1]*n2,H[2,1]*n1+H[2,2]*n2,0,0,0]])
eigenStructure=np.linalg.eig(JN.T)
contact=np.where(eigenStructure[0]==0)[0][0]
cfplus=np.where(eigenStructure[0]==np.max(eigenStructure[0]))[0][0]
cfminus=np.where(eigenStructure[0]==np.min(eigenStructure[0]))[0][0]
index=np.ones(5);index[[contact,cfminus,cfplus]]-=1
cs=np.where(index!=0.)[0]
csminus=np.where(eigenStructure[0]==np.min(eigenStructure[0][cs]))[0][0]
csplus=np.where(eigenStructure[0]==np.max(eigenStructure[0][cs]))[0][0]
lcfminus=eigenStructure[1][:,cfminus];lcfplus=eigenStructure[1][:,cfplus]
lcontact=eigenStructure[1][:,contact]
dl=lcfminus-lcfplus
if not (dl[4]!=0. and dl[0]!=0. and dl[1]!=0.):
psi12=-dl[2]/dl[3]
if not (lcontact[0]>1.e-6 and lcontact[1]>1.e-6):
psi22=(lcontact[3]*(dl[2]/dl[3])-lcontact[2])/lcontact[4]
"""
return np.array([psi12,psi22])
def computeLodeAngle(sig11,sig22,sig12,sig33):
# deviatoric stress
sDev=computeDeviatoricPart(np.array([sig11,sig12,sig22,sig33]))
s11=sDev[0];s12=sDev[1]/np.sqrt(2.);s22=sDev[2];s33=sDev[3]
# deviator 2nd and 3rd invariants
J3=s33*(s11*s22-s12**2) ; sqrtJ2=np.sqrt(0.5*np.dot(sDev,sDev))
theta=np.arccos((3./2.)*np.sqrt(3.)*J3/(sqrtJ2**3))/3.
theta=theta*360./(2.*np.pi)
return theta
def updateEquivalentPlasticStrain(sig,sign,H):
# sig=[sig11^n , sqrt(2)*sig12^n , sig22 , sig33^n]
# sign=[sig11^n+1 , sqrt(2)*sig12^n+1 , sig22 , sig33^n+1]
sigDev=computeDeviatoricPart(np.array([sign[0],sign[1]/np.sqrt(2.),sign[2],sign[3]]))
norm=np.sqrt(np.dot(sigDev,sigDev))
flow=sigDev/norm
dSig=sign-sig
dp=(1./H)*np.sqrt(3./2.)*np.dot(flow,dSig)
return dp
def plasticResidual(sig,sign,p,pn,H):
# sig=[sig11^n , sqrt(2)*sig12^n , sig22 , sig33^n]
# sign=[sig11^n+1 , sqrt(2)*sig12^n+1 , sig22 , sig33^n+1]
sigDev=computeDeviatoricPart(np.array([sign[0],sign[1]/np.sqrt(2.),sign[2],sign[3]]))
norm=np.sqrt(np.dot(sigDev,sigDev))
flow=sigDev/norm
dSig=sign-sig
dp=(1./H)*np.sqrt(3./2.)*np.dot(flow,dSig)
res=pn-p-dp
return res
def computeEigenStresses(sig):
# | sig11 sig12 0 |
#sig=| sig12 sig22 0 |
# | 0 0 sig33 |
s3=sig[2,2]
delta=(sig[0,0]-sig[1,1])**2+4.*sig[0,1]**2
s1=0.5*(sig[0,0]+sig[1,1]-np.sqrt(delta))
s2=0.5*(sig[0,0]+sig[1,1]+np.sqrt(delta))
return np.array([s1,s2,s3])
from mpl_toolkits.mplot3d import proj3d
def orthogonal_proj(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,0,zback]])
proj3d.persp_transformation = orthogonal_proj
Samples=5
# Sample constant stress component sig22
sig22=np.linspace(0.,sigy,Samples)
#sig22=np.linspace(-sigy/np.sqrt(1-nu+nu**2),sigy/np.sqrt(1-nu+nu**2),Samples)
Samples*=10
sig=np.zeros((Samples,Samples))
tau=np.zeros((Samples,Samples))
frames=[10,20,40]
frames=[5,10,15,20]
col=["r","g","b","y","c","m","k","p"]
tauM=1.5*sigy/np.sqrt(3.)
sigM=1.5*sigy/np.sqrt(1-nu+nu**2)
tauM=sigM
Niter=1000
TAU=np.zeros((Niter,len(frames),len(sig22)))
SIG11=np.zeros((Niter,len(frames),len(sig22)))
SIG22=np.zeros((Niter,len(frames),len(sig22)))
eigsigS=np.zeros((Niter,len(frames),len(sig22),3))
criterionS=np.zeros((Niter,len(frames)))
PsiS=np.zeros((Samples,len(sig22)))
plast_S=np.zeros((Niter,len(frames)))
LodeAngle_S=np.zeros((Niter,len(frames)))
# Boolean to plot the upadted yield surface
updated_criterion=False
for k in range(len(sig22)-1):
s22=sig22[k]
Delta=(4.*sigy**2- 3.*s22**2)
sigMax=(s22+np.sqrt(Delta))/2.
sigMin=(s22-np.sqrt(Delta))/2.
# Sample stress component sig11
sig[:,k]=np.linspace(sigMin,sigMax,Samples)
sig[:,k]=np.linspace(0.,sigMax,Samples)
# Compute shear stress satisfying the criterion given sig11 and sig22
for i in range(Samples):
s11=sig[i,k]
delta=(s11*s22 -s11**2-s22**2 + sigy**2)/3.
if np.abs(delta)<10. : delta=np.abs(delta)
tauMax=np.sqrt(delta)
f_vm=lambda x:computeCriterion(s11,s22,x,0.,sigy)
tau[i,k]=np.sqrt(delta)
## LOADING PATHS PLOTS
for k in range(len(sig22)-1)[1:]:
s22=sig22[k]
sigM=1.25*np.max(sig[:,k])
tauM=1.25*np.max(tau[:,k])
## For each value of sig22 trace the loading paths given by psis from yield surface to an arbitrary shear stress level
approx=np.zeros((len(frames),2))
ordonnees=np.zeros((len(frames),Samples))
abscisses=np.zeros((len(frames),Samples))
radius_S=np.zeros(len(frames))
for s,i in enumerate(frames):
if i==0:
continue
sig0=sig[-1-i,k]
tau0=tau[-1-i,k]
dsig=(sigM-sig0)/Niter
SIG11[:,s,k]=np.linspace(sig0,sigM,Niter)
TAU[0,s,k]=tau0
SIG22[0,s,k]=s22
#rSlow = ode(computePsiSlow).set_integrator('vode',method='bdf')
rSlow = ode(computePsiSlow).set_integrator('vode',method='adams',order=12)
rSlow.set_initial_value(np.array([TAU[0,s,k],SIG22[0,s,k]]),SIG11[0,s,k]).set_f_params(0.,lamb,mu,beta,'planeStress',rho)
sigma = np.matrix([[SIG11[0,s,k],TAU[0,s,k],0.],[TAU[0,s,k],SIG22[0,s,k],0.],[0.,0.,0.]])
eigsig=np.linalg.eig(sigma)[0]
eigsigS[0,s,k,:]=eigsig
LodeAngle_S[0,s]=computeLodeAngle(sigma[0,0],SIG22[0,s,k],sigma[0,1],0.)
p=0.
epsp33=0.
for j in range(Niter-1):
rSlow.set_f_params(np.array([TAU[j,s,k],SIG22[j,s,k]]),0.,lamb,mu,beta,'planeStress',rho)
if not rSlow.successful():
print "Integration issues in slow wave path"
break
rSlow.integrate(rSlow.t+dsig)
TAU[j+1,s,k],SIG22[j+1,s,k]=rSlow.y
sigma = np.array([SIG11[j,s,k],np.sqrt(2.)*TAU[j,s,k],SIG22[j,s,k],0.])
sigman = np.array([SIG11[j+1,s,k],np.sqrt(2.)*TAU[j+1,s,k],SIG22[j+1,s,k],0.])
f_vm=computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*p)
#if f_vm>0. :
#p+=updateEquivalentPlasticStrain(sigma,sigman,H)
#residual=lambda x: plasticResidual(sigma,sigman,p,x,H)
residual=lambda x: computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*x)
p=scipy.optimize.root(residual,p,method='hybr',options={'xtol':1.e-12}).x[0]
criterionS[j+1,s]=computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*p)
plast_S[j+1,s]=p
LodeAngle_S[j+1,s]=computeLodeAngle(sigman[0],sigman[2],sigman[1]/np.sqrt(2.),0.)
# Eigenvalues of sigma (for deviatoric plane plots)
sigma = np.matrix([[SIG11[j+1,s,k],TAU[j+1,s,k],0.],[TAU[j+1,s,k],SIG22[j+1,s,k],0.],[0.,0.,0.]])
eigsigS[j+1,s,k,:]=computeEigenStresses(sigma)
print "Final equivalent plastic strain after slow wave : ",p
radius_S[s]=sigy+H*p
TAU_MAX_S=np.max(ordonnees)
SIG_MAX_S=np.max(abscisses)
### SUBPLOTS SETTINGS
fig = plt.figure()
ax2=plt.subplot2grid((1,2),(0,1),projection='3d')
ax1d1=plt.subplot2grid((1,2),(0,0))
ax1d1.grid()
ax1d1.set_xlabel(r'$\Theta$', fontsize=24)
ax1d1.set_ylabel('p', fontsize=24)
fvm1=ax1d1.twinx()
fvm1.set_ylabel('f',fontsize=18.)
fvm1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
cylindre=vonMisesYieldSurface(sigy)
ax2.plot_wireframe(cylindre[0,:],cylindre[1,:],cylindre[2,:], color="k")
elevation_Angle_radian=np.arctan(1./np.sqrt(2.0))
angle_degree= 180.*elevation_Angle_radian/np.pi
radius=1.*np.sqrt((2./3.)*sigy**2)
ax2.set_xlim(-1.*radius,1.*radius)
ax2.set_ylim(-1.*radius,1.*radius)
ax2.set_zlim(-1.*radius,1.*radius)
ax2.view_init(angle_degree,45.)
ax2.plot([0.,sigy],[0.,sigy],[0.,sigy],color="k")
ax2.set_xlabel(r'$\sigma_1$',size=24.)
ax2.set_ylabel(r'$\sigma_2$',size=24.)
ax2.set_zlabel(r'$\sigma_3$',size=24.)
for p in range(len(frames)):
if updated_criterion :
cylindre=vonMisesYieldSurface(radius_S[p])
ax2.plot_wireframe(cylindre[0,:],cylindre[1,:],cylindre[2,:], color=col[p],linestyle='--')
## 2D plot of equivalent plastic strain evolution
ax1d1.plot(LodeAngle_S[:Niter/5,p],plast_S[:Niter/5,p],col[p])
#ax1d1_2.plot(LodeAngle_S[:Niter/5,p],SIG33_S[:Niter/5,p,k],col[p],marker='o')
fvm1.plot(LodeAngle_S[:,p],criterionS[:,p],col[p],linestyle='--')
## 3D plots of loading paths (deviatoric plane)
ax2.plot(eigsigS[:,p,k,0],eigsigS[:,p,k,1],eigsigS[:,p,k,2],color=col[p],marker="o")
ax2.plot([-sigy,sigy],[0.,0.],[0.,0.],color="k",linestyle="--",lw=1.)
ax2.plot([0.,0.],[-sigy,sigy],[0.,0.],color="k",linestyle="--",lw=1.)
ax2.plot([-radius,radius],[radius,-radius],[0.,0.],color="k",linestyle="--",lw=1.)
#plt.show()
fig = plt.figure()
ax1=plt.subplot2grid((1,2),(0,0))
ax2=plt.subplot2grid((1,2),(0,1))
ax1.set_xlabel(r'$\sigma_{11}$',size=28.)
ax1.set_ylabel(r'$\sigma_{12}$',size=28.)
#ax1.set_zlabel(r'$\sigma_{22}$',size=28.)
ax2.set_xlabel(r'$\sigma_{22}$',size=28.)
ax2.set_ylabel(r'$\sigma_{12}$',size=28.)
#ax2.set_zlabel(r'$\sigma_{11}$',size=28.)
ax1.grid()
ax2.grid()
#ax2.view_init(-90.,-0.)
#ax1.view_init(-90.,0.)
for s,i in enumerate(frames):
sig0=sig[-1-i,k]
s22max=(sig0+np.sqrt(4*sigy**2-3.*sig0**2))/2.
s22min=(sig0-np.sqrt(4*sigy**2-3.*sig0**2))/2.
s22=np.linspace(s22min,s22max,Samples)
s12=np.sqrt((sigy**2- sig0**2-s22**2+sig0*s22)/3.)
ax2.plot(s22,s12,color=col[s])
ax1.plot(sig[:,k],tau[:,k],'k')
#ax2.plot(sig[:,k],tau[:,k],sig22[k],'k')
for p in range(len(frames)):
ax1.plot(SIG11[:,p,k],TAU[:,p,k],color=col[p])
ax2.plot(SIG22[:,p,k],TAU[:,p,k],color=col[p])
plt.show()
| [] |
cherish-web/pyhsms | pyhsms/core/connectionstate.py | 83a88b8b45bf1aba30cb7572f44a02478009052b | # _*_ coding: utf-8 _*_
#@Time : 2020/7/29 上午 09:49
#@Author : cherish_peng
#@Email : [email protected]
#@File : connectionstate.py
#@Software : PyCharm
from enum import Enum
class ConnectionState(Enum):
'''
ConnectionState enum
'''
DisConnected = 0
Connecting=1
Connected=2
Selected=3
Retry=4 | [] |
msanpe/lifelines | lifelines/fitters/coxph_fitter.py | a73d441f6347332ca870bf2ec32eeeca410dc6de | # -*- coding: utf-8 -*-
import time
from datetime import datetime
import warnings
from textwrap import dedent, fill
import numpy as np
import pandas as pd
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve, LinAlgError
from scipy.integrate import trapz
from scipy import stats
from lifelines.fitters import BaseFitter, Printer
from lifelines.plotting import set_kwargs_drawstyle
from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult
from lifelines.utils.lowess import lowess
from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio
from lifelines.utils import (
_get_index,
_to_list,
_to_tuple,
_to_1d_array,
inv_normal_cdf,
normalize,
qth_survival_times,
coalesce,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation,
check_nans_or_infs,
StatError,
ConvergenceWarning,
StatisticalWarning,
StepSizer,
ConvergenceError,
string_justify,
interpolate_at_times_and_return_pandas,
CensoringType,
interpolate_at_times,
format_p_value,
)
__all__ = ["CoxPHFitter"]
class BatchVsSingle:
@staticmethod
def decide(batch_mode, n_unique, n_total, n_vars):
frac_dups = n_unique / n_total
if batch_mode or (
# https://github.com/CamDavidsonPilon/lifelines/issues/591 for original issue.
# new values from from perf/batch_vs_single script.
(batch_mode is None)
and (
(
6.876218e-01
+ -1.796993e-06 * n_total
+ -1.204271e-11 * n_total ** 2
+ 1.912500e00 * frac_dups
+ -8.121036e-01 * frac_dups ** 2
+ 4.916605e-06 * n_total * frac_dups
+ -5.888875e-03 * n_vars
+ 5.473434e-09 * n_vars * n_total
)
< 1
)
):
return "batch"
return "single"
class CoxPHFitter(BaseFitter):
r"""
This class implements fitting Cox's proportional hazard model:
.. math:: h(t|x) = h_0(t) \exp((x - \overline{x})' \beta)
Parameters
----------
alpha: float, optional (default=0.05)
the level in the confidence intervals.
tie_method: string, optional
specify how the fitter should deal with ties. Currently only
'Efron' is available.
penalizer: float, optional (default=0.0)
Attach an L2 penalizer to the size of the coefficients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of :math:`\beta_i`.
The penalty is :math:`\frac{1}{2} \text{penalizer} ||\beta||^2`.
strata: list, optional
specify a list of columns to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
Examples
--------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>> rossi = load_rossi()
>>> cph = CoxPHFitter()
>>> cph.fit(rossi, 'week', 'arrest')
>>> cph.print_summary()
Attributes
----------
params_ : Series
The estimated coefficients. Changed in version 0.22.0: use to be ``.hazards_``
hazard_ratios_ : Series
The exp(coefficients)
confidence_intervals_ : DataFrame
The lower and upper confidence intervals for the hazard coefficients
durations: Series
The durations provided
event_observed: Series
The event_observed variable provided
weights: Series
The event_observed variable provided
variance_matrix_ : numpy array
The variance matrix of the coefficients
strata: list
the strata provided
standard_errors_: Series
the standard errors of the estimates
score_: float
the concordance index of the model.
baseline_hazard_: DataFrame
baseline_cumulative_hazard_: DataFrame
baseline_survival_: DataFrame
"""
_KNOWN_MODEL = True
def __init__(self, alpha=0.05, tie_method="Efron", penalizer=0.0, strata=None):
super(CoxPHFitter, self).__init__(alpha=alpha)
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != "Efron":
raise NotImplementedError("Only Efron is available at the moment.")
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = strata
@CensoringType.right_censoring
def fit(
self,
df,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
params_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.params_ = pd.Series(params_, index=X.columns, name="coef") / self._norm_std
self.hazard_ratios_ = pd.Series(np.exp(self.params_), index=X.columns, name="exp(coef)")
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.rename(columns={0: "P"})
.assign(T=self.durations.values, E=self.event_observed.values, W=self.weights.values)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, "_concordance_score_"):
# we have already fit the model.
del self._concordance_score_
return self
def _preprocess_dataframe(self, df):
# this should be a pure function
df = df.copy()
if self.strata is not None:
df = df.sort_values(by=_to_list(self.strata) + [self.duration_col])
original_index = df.index.copy()
df = df.set_index(self.strata)
else:
df = df.sort_values(by=self.duration_col)
original_index = df.index.copy()
# Extract time and event
T = df.pop(self.duration_col)
E = (
df.pop(self.event_col)
if (self.event_col is not None)
else pd.Series(np.ones(self._n_examples), index=df.index, name="E")
)
W = (
df.pop(self.weights_col)
if (self.weights_col is not None)
else pd.Series(np.ones((self._n_examples,)), index=df.index, name="weights")
)
_clusters = df.pop(self.cluster_col).values if self.cluster_col else None
X = df.astype(float)
T = T.astype(float)
# we check nans here because converting to bools maps NaNs to True..
check_nans_or_infs(E)
E = E.astype(bool)
self._check_values(X, T, E, W)
return X, T, E, W, original_index, _clusters
def _check_values(self, X, T, E, W):
check_for_numeric_dtypes_or_raise(X)
check_nans_or_infs(T)
check_nans_or_infs(X)
check_low_var(X)
check_complete_separation(X, E, T, self.event_col)
# check to make sure their weights are okay
if self.weights_col:
if (W.astype(int) != W).any() and not self.robust:
warnings.warn(
"""It appears your weights are not integers, possibly propensity or sampling scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
""",
StatisticalWarning,
)
if (W <= 0).any():
raise ValueError("values in weight column %s must be positive." % self.weights_col)
def _fit_model(
self,
X,
T,
E,
weights=None,
initial_point=None,
step_size=None,
precision=1e-07,
show_progress=True,
max_steps=50,
): # pylint: disable=too-many-statements,too-many-branches
"""
Newton Rhaphson algorithm for fitting CPH model.
Note
----
The data is assumed to be sorted on T!
Parameters
----------
X: (n,d) Pandas DataFrame of observations.
T: (n) Pandas Series representing observed durations.
E: (n) Pandas Series representing death events.
weights: (n) an iterable representing weights per observation.
initial_point: (d,) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float, optional
> 0.001 to determine a starting step size in NR algorithm.
precision: float, optional
the convergence halts if the norm of delta between
successive positions is less than epsilon.
show_progress: boolean, optional
since the fitter is iterative, show convergence
diagnostics.
max_steps: int, optional
the maximum number of iterations of the Newton-Rhaphson algorithm.
Returns
-------
beta: (1,d) numpy array.
"""
self.path = []
assert precision <= 1.0, "precision must be less than or equal to 1."
_, d = X.shape
# make sure betas are correct size.
if initial_point is not None:
assert initial_point.shape == (d,)
beta = initial_point
else:
beta = np.zeros((d,))
step_sizer = StepSizer(step_size)
step_size = step_sizer.next()
# Method of choice is just efron right now
if self.tie_method == "Efron":
decision = BatchVsSingle.decide(self._batch_mode, T.nunique(), X.shape[0], X.shape[1])
get_gradients = getattr(self, "_get_efron_values_%s" % decision)
self._batch_mode = decision == "batch"
else:
raise NotImplementedError("Only Efron is available.")
i = 0
converging = True
ll, previous_ll = 0, 0
start = time.time()
while converging:
self.path.append(beta.copy())
i += 1
if self.strata is None:
h, g, ll = get_gradients(X.values, T.values, E.values, weights.values, beta)
else:
g = np.zeros_like(beta)
h = np.zeros((beta.shape[0], beta.shape[0]))
ll = 0
for _h, _g, _ll in self._partition_by_strata_and_apply(X, T, E, weights, get_gradients, beta):
g += _g
h += _h
ll += _ll
if i == 1 and np.all(beta == 0):
# this is a neat optimization, the null partial likelihood
# is the same as the full partial but evaluated at zero.
# if the user supplied a non-trivial initial point, we need to delay this.
self._ll_null_ = ll
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta
h.flat[:: d + 1] -= self.penalizer
# reusing a piece to make g * inv(h) * g.T faster later
try:
inv_h_dot_g_T = spsolve(-h, g, assume_a="pos", check_finite=False)
except ValueError as e:
if "infs or NaNs" in str(e):
raise ConvergenceError(
"""Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
else:
# something else?
raise e
except LinAlgError as e:
raise ConvergenceError(
"""Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
delta = inv_h_dot_g_T
if np.any(np.isnan(delta)):
raise ConvergenceError(
"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
"""
)
# Save these as pending result
hessian, gradient = h, g
norm_delta = norm(delta)
# reusing an above piece to make g * inv(h) * g.T faster.
newton_decrement = g.dot(inv_h_dot_g_T) / 2
if show_progress:
print(
"\rIteration %d: norm_delta = %.5f, step_size = %.4f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
% (i, norm_delta, step_size, ll, newton_decrement, time.time() - start),
end="",
)
# convergence criteria
if norm_delta < precision:
converging, completed = False, True
elif previous_ll != 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
# this is what R uses by default
converging, completed = False, True
elif newton_decrement < precision:
converging, completed = False, True
elif i >= max_steps:
# 50 iterations steps with N-R is a lot.
# Expected convergence is ~10 steps
converging, completed = False, False
elif step_size <= 0.00001:
converging, completed = False, False
elif abs(ll) < 0.0001 and norm_delta > 1.0:
warnings.warn(
"The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/q/11109/11867 for more.\n",
ConvergenceWarning,
)
converging, completed = False, False
beta += step_size * delta
previous_ll = ll
step_size = step_sizer.update(norm_delta).next()
self._hessian_ = hessian
self._score_ = gradient
self.log_likelihood_ = ll
if show_progress and completed:
print("Convergence completed after %d iterations." % (i))
elif show_progress and not completed:
print("Convergence failed. See any warning messages.")
# report to the user problems that we detect.
if completed and norm_delta > 0.1:
warnings.warn(
"Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"
% norm_delta,
ConvergenceWarning,
)
elif not completed:
warnings.warn(
"Newton-Rhaphson failed to converge sufficiently in %d steps.\n" % max_steps, ConvergenceWarning
)
return beta
def _get_efron_values_single(self, X, T, E, weights, beta):
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = np.zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * np.exp(np.dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = np.multiply.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = np.arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = np.zeros((d,))
tie_phi = 0
tie_phi_x = np.zeros((d,))
tie_phi_x_x = np.zeros((d, d))
return hessian, gradient, log_lik
@staticmethod
def _trivial_log_likelihood_batch(T, E, weights):
# used for log-likelihood test
n = T.shape[0]
log_lik = 0
_, counts = np.unique(-T, return_counts=True)
risk_phi = 0
pos = n
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
weights_at_t = weights[slice_]
phi_i = weights_at_t
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
# Calculate the sums of Tie set
deaths = E[slice_]
tied_death_counts = deaths.astype(int).sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
weights_deaths = weights_at_t[deaths]
weight_count = weights_deaths.sum()
if tied_death_counts > 1:
tie_phi = phi_i[deaths].sum()
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
pos -= count_of_removals
return log_lik
@staticmethod
def _trivial_log_likelihood_single(T, E, weights):
# assumes sorted on T!
log_lik = 0
n = T.shape[0]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
# Calculate phi values
phi_i = weights[i]
w = weights[i]
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
# Calculate sums of Ties, if this is an event
if ei:
tie_phi = tie_phi + phi_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
if tied_death_counts > 1:
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
# reset tie values
tied_death_counts = 0
weight_count = 0.0
tie_phi = 0
return log_lik
def _get_efron_values_batch(self, X, T, E, weights, beta): # pylint: disable=too-many-locals
"""
Assumes sorted on ascending on T
Calculates the first and second order vector differentials, with respect to beta.
A good explanation for how Efron handles ties. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
Returns
-------
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# weights = weights[:, None]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# counts are sorted by -T
_, counts = np.unique(-T, return_counts=True)
scores = weights * np.exp(np.dot(X, beta))
pos = n
ZERO_TO_N = np.arange(counts.max())
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
X_at_t = X[slice_]
weights_at_t = weights[slice_]
deaths = E[slice_]
phi_i = scores[slice_, None]
phi_x_i = phi_i * X_at_t
phi_x_x_i = np.dot(X_at_t.T, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
risk_phi_x = risk_phi_x + (phi_x_i).sum(0)
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate the sums of Tie set
tied_death_counts = deaths.sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
"""
I think there is another optimization that can be made if we sort on
T and E. Using some accounting, we can skip all the [death] indexing below.
"""
xi_deaths = X_at_t[deaths]
weights_deaths = weights_at_t[deaths]
x_death_sum = np.einsum("a,ab->b", weights_deaths, xi_deaths)
weight_count = weights_deaths.sum()
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
# a lot of this is now in Einstein notation for performance, but see original "expanded" code here
# https://github.com/CamDavidsonPilon/lifelines/blob/e7056e7817272eb5dff5983556954f56c33301b1/lifelines/fitters/coxph_fitter.py#L755-L789
# it's faster if we can skip computing these when we don't need to.
phi_x_i_deaths = phi_x_i[deaths]
tie_phi = phi_i[deaths].sum()
tie_phi_x = (phi_x_i_deaths).sum(0)
tie_phi_x_x = np.dot(xi_deaths.T, phi_x_i_deaths)
increasing_proportion = ZERO_TO_N[:tied_death_counts] / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
# computes outer products and sums them together.
# Naive approach is to
# 1) broadcast tie_phi_x_x and increasing_proportion into a (tied_death_counts, d, d) matrix
# 2) broadcast risk_phi_x_x and denom into a (tied_death_counts, d, d) matrix
# 3) subtract them, and then sum to (d, d)
# Alternatively, we can sum earlier without having to explicitly create (_, d, d) matrices. This is used here.
#
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
# no tensors here, but do some casting to make it easier in the converging step next.
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
# This is a batch outer product.
# given a matrix t, for each row, m, compute it's outer product: m.dot(m.T), and stack these new matrices together.
# which would be: np.einsum("Bi, Bj->Bij", t, t)
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
pos -= count_of_removals
return hessian, gradient, log_lik
def _partition_by_strata(self, X, T, E, weights, as_dataframes=False):
for stratum, stratified_X in X.groupby(self.strata):
stratified_E, stratified_T, stratified_W = (E.loc[[stratum]], T.loc[[stratum]], weights.loc[[stratum]])
if not as_dataframes:
yield (stratified_X.values, stratified_T.values, stratified_E.values, stratified_W.values), stratum
else:
yield (stratified_X, stratified_T, stratified_E, stratified_W), stratum
def _partition_by_strata_and_apply(self, X, T, E, weights, function, *args):
for (stratified_X, stratified_T, stratified_E, stratified_W), _ in self._partition_by_strata(X, T, E, weights):
yield function(stratified_X, stratified_T, stratified_E, stratified_W, *args)
def _compute_martingale(self, X, T, E, _weights, index=None):
# TODO: _weights unused
partial_hazard = self.predict_partial_hazard(X)[0].values
if not self.strata:
baseline_at_T = self.baseline_cumulative_hazard_.loc[T, "baseline cumulative hazard"].values
else:
baseline_at_T = np.empty(0)
for name, T_ in T.groupby(by=self.strata):
baseline_at_T = np.append(baseline_at_T, self.baseline_cumulative_hazard_[name].loc[T_])
martingale = E - (partial_hazard * baseline_at_T)
return pd.DataFrame(
{self.duration_col: T.values, self.event_col: E.values, "martingale": martingale.values}, index=index
)
def _compute_deviance(self, X, T, E, weights, index=None):
df = self._compute_martingale(X, T, E, weights, index)
rmart = df.pop("martingale")
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore")
log_term = np.where((E.values - rmart.values) <= 0, 0, E.values * np.log(E.values - rmart.values))
deviance = np.sign(rmart) * np.sqrt(-2 * (rmart + log_term))
df["deviance"] = deviance
return df
def _compute_scaled_schoenfeld(self, X, T, E, weights, index=None):
r"""
Let s_k be the kth schoenfeld residuals. Then E[s_k] = 0.
For tests of proportionality, we want to test if \beta_i(t) is \beta_i (constant) or not.
Let V_k be the contribution to the information matrix at time t_k. A main result from Grambsch and Therneau is that
\beta(t) = E[s_k*V_k^{-1} + \hat{beta}]
so define s_k^* = s_k*V_k^{-1} + \hat{beta} as the scaled schoenfeld residuals.
We can approximate V_k with Hessian/d, so the inverse of Hessian/d is (d * variance_matrix_)
Notes
-------
lifelines does not add the coefficients to the final results, but R does when you call residuals(c, "scaledsch")
"""
n_deaths = self.event_observed.sum()
scaled_schoenfeld_resids = n_deaths * self._compute_schoenfeld(X, T, E, weights, index).dot(
self.variance_matrix_
)
scaled_schoenfeld_resids.columns = self.params_.index
return scaled_schoenfeld_resids
def _compute_schoenfeld(self, X, T, E, weights, index=None):
# TODO: should the index by times, i.e. T[E]?
# Assumes sorted on T and on strata
# cluster does nothing to this, as expected.
_, d = X.shape
if self.strata is not None:
schoenfeld_residuals = np.empty((0, d))
for schoenfeld_residuals_in_strata in self._partition_by_strata_and_apply(
X, T, E, weights, self._compute_schoenfeld_within_strata
):
schoenfeld_residuals = np.append(schoenfeld_residuals, schoenfeld_residuals_in_strata, axis=0)
else:
schoenfeld_residuals = self._compute_schoenfeld_within_strata(X.values, T.values, E.values, weights.values)
# schoenfeld residuals are only defined for subjects with a non-zero event.
df = pd.DataFrame(schoenfeld_residuals[E, :], columns=self.params_.index, index=index[E])
return df
def _compute_schoenfeld_within_strata(self, X, T, E, weights):
"""
A positive value of the residual shows an X value that is higher than expected at that death time.
"""
# TODO: the diff_against is gross
# This uses Efron ties.
n, d = X.shape
if not np.any(E):
# sometimes strata have no deaths. This means nothing is returned
# in the below code.
return np.zeros((n, d))
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d))
# Init number of ties and weights
weight_count = 0.0
tie_count = 0
scores = weights * np.exp(np.dot(X, self.params_))
diff_against = []
schoenfeld_residuals = np.empty((0, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i : i + 1]
score = scores[i : i + 1]
w = weights[i]
# Calculate phi values
phi_i = score
phi_x_i = phi_i * xi
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
# Calculate sums of Ties, if this is an event
diff_against.append((xi, ei))
if ei:
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
# Keep track of count
tie_count += 1 # aka death counts
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tie_count == 0:
for _ in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, np.zeros((1, d)), axis=0)
diff_against = []
continue
# There was atleast one event and no more ties remain. Time to sum.
weighted_mean = np.zeros((1, d))
for l in range(tie_count):
numer = risk_phi_x - l * tie_phi_x / tie_count
denom = risk_phi - l * tie_phi / tie_count
weighted_mean += numer / (denom * tie_count)
for xi, ei in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, ei * (xi - weighted_mean), axis=0)
# reset tie values
tie_count = 0
weight_count = 0.0
tie_phi = 0
tie_phi_x = np.zeros((1, d))
diff_against = []
return schoenfeld_residuals[::-1]
def _compute_delta_beta(self, X, T, E, weights, index=None):
"""
approximate change in betas as a result of excluding ith row. Good for finding outliers / specific
subjects that influence the model disproportionately. Good advice: don't drop these outliers, model them.
"""
score_residuals = self._compute_score(X, T, E, weights, index=index)
d = X.shape[1]
scaled_variance_matrix = self.variance_matrix_ * np.tile(self._norm_std.values, (d, 1)).T
delta_betas = score_residuals.dot(scaled_variance_matrix)
delta_betas.columns = self.params_.index
return delta_betas
def _compute_score(self, X, T, E, weights, index=None):
_, d = X.shape
if self.strata is not None:
score_residuals = np.empty((0, d))
for score_residuals_in_strata in self._partition_by_strata_and_apply(
X, T, E, weights, self._compute_score_within_strata
):
score_residuals = np.append(score_residuals, score_residuals_in_strata, axis=0)
else:
score_residuals = self._compute_score_within_strata(X.values, T, E.values, weights.values)
return pd.DataFrame(score_residuals, columns=self.params_.index, index=index)
def _compute_score_within_strata(self, X, _T, E, weights):
# https://www.stat.tamu.edu/~carroll/ftp/gk001.pdf
# lin1989
# https://www.ics.uci.edu/~dgillen/STAT255/Handouts/lecture10.pdf
# Assumes X already sorted by T with strata
# TODO: doesn't handle ties.
# TODO: _T unused
n, d = X.shape
# we already unnormalized the betas in `fit`, so we need normalize them again since X is
# normalized.
beta = self.params_.values * self._norm_std
E = E.astype(int)
score_residuals = np.zeros((n, d))
phi_s = np.exp(np.dot(X, beta))
# need to store these histories, as we access them often
# this is a reverse cumulative sum. See original code in https://github.com/CamDavidsonPilon/lifelines/pull/496/files#diff-81ee0759dbae0770e1a02cf17f4cfbb1R431
risk_phi_x_history = (X * (weights * phi_s)[:, None])[::-1].cumsum(0)[::-1]
risk_phi_history = (weights * phi_s)[::-1].cumsum()[::-1][:, None]
# Iterate forwards
for i in range(0, n):
xi = X[i : i + 1]
phi_i = phi_s[i]
score = -phi_i * (
(
E[: i + 1] * weights[: i + 1] / risk_phi_history[: i + 1].T
).T # this is constant-ish, and could be cached
* (xi - risk_phi_x_history[: i + 1] / risk_phi_history[: i + 1])
).sum(0)
if E[i]:
score = score + (xi - risk_phi_x_history[i] / risk_phi_history[i])
score_residuals[i, :] = score
return score_residuals * weights[:, None]
def compute_residuals(self, training_dataframe, kind):
"""
Parameters
----------
training_dataframe : pandas DataFrame
the same training DataFrame given in `fit`
kind : string
{'schoenfeld', 'score', 'delta_beta', 'deviance', 'martingale', 'scaled_schoenfeld'}
"""
ALLOWED_RESIDUALS = {"schoenfeld", "score", "delta_beta", "deviance", "martingale", "scaled_schoenfeld"}
assert kind in ALLOWED_RESIDUALS, "kind must be in %s" % ALLOWED_RESIDUALS
warnings.filterwarnings("ignore", category=ConvergenceWarning)
X, T, E, weights, shuffled_original_index, _ = self._preprocess_dataframe(training_dataframe)
resids = getattr(self, "_compute_%s" % kind)(X, T, E, weights, index=shuffled_original_index)
return resids
def _compute_confidence_intervals(self):
ci = 100 * (1 - self.alpha)
z = inv_normal_cdf(1 - self.alpha / 2)
se = self.standard_errors_
hazards = self.params_.values
return pd.DataFrame(
np.c_[hazards - z * se, hazards + z * se],
columns=["%g%% lower-bound" % ci, "%g%% upper-bound" % ci],
index=self.params_.index,
)
def _compute_standard_errors(self, X, T, E, weights):
if self.robust or self.cluster_col:
se = np.sqrt(self._compute_sandwich_estimator(X, T, E, weights).diagonal())
else:
se = np.sqrt(self.variance_matrix_.diagonal())
return pd.Series(se, name="se", index=self.params_.index)
def _compute_sandwich_estimator(self, X, T, E, weights):
delta_betas = self._compute_delta_beta(X, T, E, weights)
if self.cluster_col:
delta_betas = delta_betas.groupby(self._clusters).sum()
sandwich_estimator = delta_betas.T.dot(delta_betas)
return sandwich_estimator.values
def _compute_z_values(self):
return self.params_ / self.standard_errors_
def _compute_p_values(self):
U = self._compute_z_values() ** 2
return stats.chi2.sf(U, 1)
@property
def summary(self):
"""Summary statistics describing the fit.
Set alpha property in the object before calling.
Returns
-------
df : DataFrame
Contains columns coef, np.exp(coef), se(coef), z, p, lower, upper"""
ci = 100 * (1 - self.alpha)
z = inv_normal_cdf(1 - self.alpha / 2)
with np.errstate(invalid="ignore", divide="ignore", over="ignore", under="ignore"):
df = pd.DataFrame(index=self.params_.index)
df["coef"] = self.params_
df["exp(coef)"] = self.hazard_ratios_
df["se(coef)"] = self.standard_errors_
df["coef lower %g%%" % ci] = self.confidence_intervals_["%g%% lower-bound" % ci]
df["coef upper %g%%" % ci] = self.confidence_intervals_["%g%% upper-bound" % ci]
df["exp(coef) lower %g%%" % ci] = self.hazard_ratios_ * np.exp(-z * self.standard_errors_)
df["exp(coef) upper %g%%" % ci] = self.hazard_ratios_ * np.exp(z * self.standard_errors_)
df["z"] = self._compute_z_values()
df["p"] = self._compute_p_values()
df["-log2(p)"] = -np.log2(df["p"])
return df
def print_summary(self, decimals=2, **kwargs):
"""
Print summary statistics describing the fit, the coefficients, and the error bounds.
Parameters
-----------
decimals: int, optional (default=2)
specify the number of decimal places to show
kwargs:
print additional metadata in the output (useful to provide model names, dataset names, etc.) when comparing
multiple outputs.
"""
# Print information about data first
justify = string_justify(25)
headers = []
headers.append(("duration col", "'%s'" % self.duration_col))
if self.event_col:
headers.append(("event col", "'%s'" % self.event_col))
if self.weights_col:
headers.append(("weights col", "'%s'" % self.weights_col))
if self.cluster_col:
headers.append(("cluster col", "'%s'" % self.cluster_col))
if self.penalizer > 0:
headers.append(("penalizer", self.penalizer))
if self.robust or self.cluster_col:
headers.append(("robust variance", True))
if self.strata:
headers.append(("strata", self.strata))
headers.extend(
[
("number of observations", "{:g}".format(self.weights.sum())),
("number of events observed", "{:g}".format(self.weights[self.event_observed > 0].sum())),
("partial log-likelihood", "{:.{prec}f}".format(self.log_likelihood_, prec=decimals)),
("time fit was run", self._time_fit_was_called),
]
)
p = Printer(headers, self, justify, decimals, kwargs)
p.print()
def log_likelihood_ratio_test(self):
"""
This function computes the likelihood ratio test for the Cox model. We
compare the existing model (with all the covariates) to the trivial model
of no covariates.
"""
if hasattr(self, "_ll_null_"):
ll_null = self._ll_null_
else:
if self._batch_mode:
ll_null = self._trivial_log_likelihood_batch(
self.durations.values, self.event_observed.values, self.weights.values
)
else:
ll_null = self._trivial_log_likelihood_single(
self.durations.values, self.event_observed.values, self.weights.values
)
ll_alt = self.log_likelihood_
test_stat = 2 * ll_alt - 2 * ll_null
degrees_freedom = self.params_.shape[0]
p_value = chisq_test(test_stat, degrees_freedom=degrees_freedom)
return StatisticalResult(
p_value,
test_stat,
name="log-likelihood ratio test",
null_distribution="chi squared",
degrees_freedom=degrees_freedom,
)
def predict_partial_hazard(self, X):
r"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
partial_hazard: DataFrame
Returns the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`\exp{(x - mean(x_{train}))'\beta}`
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
return np.exp(self.predict_log_partial_hazard(X))
def predict_log_partial_hazard(self, X):
r"""
This is equivalent to R's linear.predictors.
Returns the log of the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`(x - \text{mean}(x_{\text{train}})) \beta`
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
log_partial_hazard: DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
hazard_names = self.params_.index
if isinstance(X, pd.Series) and ((X.shape[0] == len(hazard_names) + 2) or (X.shape[0] == len(hazard_names))):
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
elif isinstance(X, pd.Series):
assert len(hazard_names) == 1, "Series not the correct argument"
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
index = _get_index(X)
if isinstance(X, pd.DataFrame):
order = hazard_names
X = X.reindex(order, axis="columns")
X = X.astype(float)
X = X.values
X = X.astype(float)
X = normalize(X, self._norm_mean.values, 1)
return pd.DataFrame(np.dot(X, self.params_), index=index)
def predict_cumulative_hazard(self, X, times=None, conditional_after=None):
"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. reset back to starting at 0.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
"""
if isinstance(X, pd.Series):
return self.predict_cumulative_hazard(X.to_frame().T, times=times, conditional_after=conditional_after)
n = X.shape[0]
if times is not None:
times = np.atleast_1d(times).astype(float)
if conditional_after is not None:
conditional_after = _to_1d_array(conditional_after).reshape(n, 1)
if self.strata:
cumulative_hazard_ = pd.DataFrame()
for stratum, stratified_X in X.groupby(self.strata):
try:
strata_c_0 = self.baseline_cumulative_hazard_[[stratum]]
except KeyError:
raise StatError(
dedent(
"""The stratum %s was not found in the original training data. For example, try
the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output."""
% (stratum, self.strata, stratum)
)
)
col = _get_index(stratified_X)
v = self.predict_partial_hazard(stratified_X)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
n_ = stratified_X.shape[0]
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n_, 1)) + conditional_after
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(strata_c_0, conditional_after)
c_0_ = np.clip((c_0_ - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n_, 1))
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at).T
cumulative_hazard_ = cumulative_hazard_.merge(
pd.DataFrame(c_0_ * v.values[:, 0], columns=col, index=times_),
how="outer",
right_index=True,
left_index=True,
)
else:
v = self.predict_partial_hazard(X)
col = _get_index(v)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n, 1)) + conditional_after
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(self.baseline_cumulative_hazard_, conditional_after)
c_0 = np.clip((c_0 - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n, 1))
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at).T
cumulative_hazard_ = pd.DataFrame(c_0 * v.values[:, 0], columns=col, index=times_)
return cumulative_hazard_
def predict_survival_function(self, X, times=None, conditional_after=None):
"""
Predict the survival function for individuals, given their covariates. This assumes that the individual
just entered the study (that is, we do not condition on how long they have already lived for.)
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
survival_function : DataFrame
the survival probabilities of individuals over the timeline
"""
return np.exp(-self.predict_cumulative_hazard(X, times=times, conditional_after=conditional_after))
def predict_percentile(self, X, p=0.5, conditional_after=None):
"""
Returns the median lifetimes for the individuals, by default. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
p: float, optional (default=0.5)
the percentile, must be between 0 and 1.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
percentiles: DataFrame
See Also
--------
predict_median
"""
subjects = _get_index(X)
return qth_survival_times(p, self.predict_survival_function(X, conditional_after=conditional_after)[subjects]).T
def predict_median(self, X, conditional_after=None):
"""
Predict the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
percentiles: DataFrame
the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
See Also
--------
predict_percentile
"""
return self.predict_percentile(X, 0.5, conditional_after=conditional_after)
def predict_expectation(self, X):
r"""
Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is
to use the fact that :math:`E[T] = \int_0^\inf P(T > t) dt = \int_0^\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.
Caution
--------
However, if the survival function doesn't converge to 0, the the expectation is really infinity and the returned
values are meaningless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
expectations : DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
See Also
--------
predict_median
predict_percentile
"""
subjects = _get_index(X)
v = self.predict_survival_function(X)[subjects]
return pd.DataFrame(trapz(v.values.T, v.index), index=subjects)
def _compute_baseline_hazard(self, partial_hazards, name):
# https://stats.stackexchange.com/questions/46532/cox-baseline-hazard
ind_hazards = partial_hazards.copy()
ind_hazards["P"] *= ind_hazards["W"]
ind_hazards["E"] *= ind_hazards["W"]
ind_hazards_summed_over_durations = ind_hazards.groupby("T")[["P", "E"]].sum()
ind_hazards_summed_over_durations["P"] = ind_hazards_summed_over_durations["P"].loc[::-1].cumsum()
baseline_hazard = pd.DataFrame(
ind_hazards_summed_over_durations["E"] / ind_hazards_summed_over_durations["P"], columns=[name]
)
baseline_hazard.index.name = None
return baseline_hazard
def _compute_baseline_hazards(self):
if self.strata:
index = self.durations.unique()
baseline_hazards_ = pd.DataFrame(index=index).sort_index()
for name, stratum_predicted_partial_hazards_ in self._predicted_partial_hazards_.groupby(self.strata):
baseline_hazards_ = baseline_hazards_.merge(
self._compute_baseline_hazard(stratum_predicted_partial_hazards_, name),
left_index=True,
right_index=True,
how="left",
)
return baseline_hazards_.fillna(0)
return self._compute_baseline_hazard(self._predicted_partial_hazards_, name="baseline hazard")
def _compute_baseline_cumulative_hazard(self):
cumulative = self.baseline_hazard_.cumsum()
if not self.strata:
cumulative = cumulative.rename(columns={"baseline hazard": "baseline cumulative hazard"})
return cumulative
def _compute_baseline_survival(self):
"""
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].copy()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax)
"""
survival_df = np.exp(-self.baseline_cumulative_hazard_)
if not self.strata:
survival_df = survival_df.rename(columns={"baseline cumulative hazard": "baseline survival"})
return survival_df
def plot(self, columns=None, hazard_ratios=False, ax=None, **errorbar_kwargs):
"""
Produces a visual representation of the coefficients (i.e. log hazard ratios), including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
hazard_ratios: bool, optional
by default, `plot` will present the log-hazard ratios (the coefficients). However, by turning this flag to True, the hazard ratios are presented instead.
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot(hazard_ratios=True)
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
errorbar_kwargs.setdefault("c", "k")
errorbar_kwargs.setdefault("fmt", "s")
errorbar_kwargs.setdefault("markerfacecolor", "white")
errorbar_kwargs.setdefault("markeredgewidth", 1.25)
errorbar_kwargs.setdefault("elinewidth", 1.25)
errorbar_kwargs.setdefault("capsize", 3)
z = inv_normal_cdf(1 - self.alpha / 2)
user_supplied_columns = True
if columns is None:
user_supplied_columns = False
columns = self.params_.index
yaxis_locations = list(range(len(columns)))
log_hazards = self.params_.loc[columns].values.copy()
order = list(range(len(columns) - 1, -1, -1)) if user_supplied_columns else np.argsort(log_hazards)
if hazard_ratios:
exp_log_hazards = np.exp(log_hazards)
upper_errors = exp_log_hazards * (np.exp(z * self.standard_errors_[columns].values) - 1)
lower_errors = exp_log_hazards * (1 - np.exp(-z * self.standard_errors_[columns].values))
ax.errorbar(
exp_log_hazards[order],
yaxis_locations,
xerr=np.vstack([lower_errors[order], upper_errors[order]]),
**errorbar_kwargs
)
ax.set_xlabel("HR (%g%% CI)" % ((1 - self.alpha) * 100))
else:
symmetric_errors = z * self.standard_errors_[columns].values
ax.errorbar(log_hazards[order], yaxis_locations, xerr=symmetric_errors[order], **errorbar_kwargs)
ax.set_xlabel("log(HR) (%g%% CI)" % ((1 - self.alpha) * 100))
best_ylim = ax.get_ylim()
ax.vlines(1 if hazard_ratios else 0, -2, len(columns) + 1, linestyles="dashed", linewidths=1, alpha=0.65)
ax.set_ylim(best_ylim)
tick_labels = [columns[i] for i in order]
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(tick_labels)
return ax
def plot_covariate_groups(self, covariates, values, plot_baseline=True, **kwargs):
"""
Produces a plot comparing the baseline survival curve of the model versus
what happens when a covariate(s) is varied over values in a group. This is useful to compare
subjects' survival as we vary covariate(s), all else being held equal. The baseline survival
curve is equal to the predicted survival curve at all average values in the original dataset.
Parameters
----------
covariates: string or list
a string (or list of strings) of the covariate(s) in the original dataset that we wish to vary.
values: 1d or 2d iterable
an iterable of the specific values we wish the covariate(s) to take on.
plot_baseline: bool
also display the baseline survival, defined as the survival at the mean of the original dataset.
kwargs:
pass in additional plotting commands.
Returns
-------
ax: matplotlib axis, or list of axis'
the matplotlib axis that be edited.
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot_covariate_groups('prio', values=np.arange(0, 15, 3), cmap='coolwarm')
.. image:: images/plot_covariate_example1.png
>>> # multiple variables at once
>>> cph.plot_covariate_groups(['prio', 'paro'], values=[
>>> [0, 0],
>>> [5, 0],
>>> [10, 0],
>>> [0, 1],
>>> [5, 1],
>>> [10, 1]
>>> ], cmap='coolwarm')
.. image:: images/plot_covariate_example2.png
>>> # if you have categorical variables, you can do the following to see the
>>> # effect of all the categories on one plot.
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> # same as:
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=np.eye(3))
"""
from matplotlib import pyplot as plt
covariates = _to_list(covariates)
n_covariates = len(covariates)
values = np.asarray(values)
if len(values.shape) == 1:
values = values[None, :].T
if n_covariates != values.shape[1]:
raise ValueError("The number of covariates must equal to second dimension of the values array.")
for covariate in covariates:
if covariate not in self.params_.index:
raise KeyError("covariate `%s` is not present in the original dataset" % covariate)
set_kwargs_drawstyle(kwargs, "steps-post")
if self.strata is None:
axes = kwargs.pop("ax", None) or plt.figure().add_subplot(111)
x_bar = self._norm_mean.to_frame().T
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(n_covariates), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=axes, **kwargs)
if plot_baseline:
self.baseline_survival_.plot(ax=axes, ls=":", color="k", drawstyle="steps-post")
else:
axes = []
for stratum, baseline_survival_ in self.baseline_survival_.iteritems():
ax = plt.figure().add_subplot(1, 1, 1)
x_bar = self._norm_mean.to_frame().T
for name, value in zip(_to_list(self.strata), _to_tuple(stratum)):
x_bar[name] = value
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(len(covariates)), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=ax, **kwargs)
if plot_baseline:
baseline_survival_.plot(
ax=ax, ls=":", label="stratum %s baseline survival" % str(stratum), drawstyle="steps-post"
)
plt.legend()
axes.append(ax)
return axes
def check_assumptions(
self, training_df, advice=True, show_plots=False, p_value_threshold=0.01, plot_n_bootstraps=10, columns=None
):
"""
Use this function to test the proportional hazards assumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
Parameters
-----------
training_df: DataFrame
the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.
advice: boolean, optional
display advice as output to the user's screen
show_plots: boolean, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
columns: list, optional
specify a subset of columns to test.
Examples
----------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>>
>>> rossi = load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>>
>>> cph.check_assumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are many covariates.
Similarly, when there are lots of observations, even minor deviances from the proportional hazard
assumption will be flagged.
With that in mind, it's best to use a combination of statistical tests and eyeball tests to
determine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf
"""
if not training_df.index.is_unique:
raise IndexError(
"`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index"
)
residuals = self.compute_residuals(training_df, kind="scaled_schoenfeld")
test_results = proportional_hazard_test(
self, training_df, time_transform=["rank", "km"], precomputed_residuals=residuals
)
residuals_and_duration = residuals.join(training_df[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.params_.index.intersection(columns or self.params_.index):
minumum_observed_p_value = test_results.summary.loc[variable, "p"].min()
if np.round(minumum_observed_p_value, 2) > p_value_threshold:
continue
counter += 1
if counter == 1:
if advice:
print(
fill(
"""The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged."""
% p_value_threshold,
width=100,
)
)
print()
print(
fill(
"""With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.""",
width=100,
)
)
print()
test_results.print_summary()
print()
print()
print(
"%d. Variable '%s' failed the non-proportional test: p-value is %s."
% (counter, variable, format_p_value(4)(minumum_observed_p_value)),
end="\n\n",
)
if advice:
values = training_df[variable]
value_counts = values.value_counts()
n_uniques = value_counts.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_uniques <= 10 and value_counts.min() >= 5:
print(
fill(
" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.".format(
n_uniques, variable
),
width=100,
)
)
else:
print(
fill(
""" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.""",
width=100,
),
end="\n\n",
)
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against all time transformations.
for i, (transform_name, transformer) in enumerate(TimeTransformers().iter(["rank", "km"]), start=1):
p_value = test_results.summary.loc[(variable, transform_name), "p"]
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color="k", alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color="k", alpha=0.30)
best_xlim = ax.get_xlim()
ax.hlines(0, 0, tt.max(), linestyles="dashed", linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel("%s-transformed time\n(p=%.4f)" % (transform_name, p_value), fontsize=10)
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.90)
if advice and counter > 0:
print(
dedent(
r"""
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification
"""
)
)
if counter == 0:
print("Proportional hazard assumption looks okay.")
@property
def score_(self):
"""
The concordance score (also known as the c-index) of the fit. The c-index is a generalization of the ROC AUC
to survival data, including censorships.
For this purpose, the ``score_`` is a measure of the predictive accuracy of the fitted model
onto the training dataset.
References
----------
https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
"""
# pylint: disable=access-member-before-definition
if not hasattr(self, "_concordance_score_"):
if self.strata:
# https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
num_correct, num_tied, num_pairs = 0, 0, 0
for _, _df in self._predicted_partial_hazards_.groupby(self.strata):
if _df.shape[0] == 1:
continue
_num_correct, _num_tied, _num_pairs = _concordance_summary_statistics(
_df["T"].values, -_df["P"].values, _df["E"].values
)
num_correct += _num_correct
num_tied += _num_tied
num_pairs += _num_pairs
else:
df = self._predicted_partial_hazards_
num_correct, num_tied, num_pairs = _concordance_summary_statistics(
df["T"].values, -df["P"].values, df["E"].values
)
self._concordance_score_ = _concordance_ratio(num_correct, num_tied, num_pairs)
return self._concordance_score_
return self._concordance_score_
| [((280, 22, 280, 51), 'lifelines.utils.coalesce', 'coalesce', ({(280, 31, 280, 37): 'strata', (280, 39, 280, 50): 'self.strata'}, {}), '(strata, self.strata)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((295, 17, 295, 62), 'lifelines.utils.normalize', 'normalize', ({(295, 27, 295, 28): 'X', (295, 30, 295, 45): 'self._norm_mean', (295, 47, 295, 61): 'self._norm_std'}, {}), '(X, self._norm_mean, self._norm_std)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((356, 8, 356, 29), 'lifelines.utils.check_nans_or_infs', 'check_nans_or_infs', ({(356, 27, 356, 28): 'E'}, {}), '(E)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((364, 8, 364, 44), 'lifelines.utils.check_for_numeric_dtypes_or_raise', 'check_for_numeric_dtypes_or_raise', ({(364, 42, 364, 43): 'X'}, {}), '(X)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((365, 8, 365, 29), 'lifelines.utils.check_nans_or_infs', 'check_nans_or_infs', ({(365, 27, 365, 28): 'T'}, {}), '(T)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((366, 8, 366, 29), 'lifelines.utils.check_nans_or_infs', 'check_nans_or_infs', ({(366, 27, 366, 28): 'X'}, {}), '(X)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((367, 8, 367, 24), 'lifelines.utils.check_low_var', 'check_low_var', ({(367, 22, 367, 23): 'X'}, {}), '(X)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((368, 8, 368, 58), 'lifelines.utils.check_complete_separation', 'check_complete_separation', ({(368, 34, 368, 35): 'X', (368, 37, 368, 38): 'E', (368, 40, 368, 41): 'T', (368, 43, 368, 57): 'self.event_col'}, {}), '(X, E, T, self.event_col)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((435, 21, 435, 41), 'lifelines.utils.StepSizer', 'StepSizer', ({(435, 31, 435, 40): 'step_size'}, {}), '(step_size)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((449, 16, 449, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((614, 18, 614, 34), 'numpy.zeros', 'np.zeros', ({(614, 27, 614, 33): '(d, d)'}, {}), '((d, d))', True, 'import numpy as np\n'), ((615, 19, 615, 33), 'numpy.zeros', 'np.zeros', ({(615, 28, 615, 32): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((619, 22, 619, 36), 'numpy.zeros', 'np.zeros', ({(619, 31, 619, 35): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((630, 20, 630, 36), 'numpy.empty', 'np.empty', ({(630, 29, 630, 35): '(d, d)'}, {}), '((d, d))', True, 'import numpy as np\n'), ((708, 20, 708, 53), 'numpy.unique', 'np.unique', (), '', True, 'import numpy as np\n'), ((817, 18, 817, 34), 'numpy.zeros', 'np.zeros', ({(817, 27, 817, 33): '(d, d)'}, {}), '((d, d))', True, 'import numpy as np\n'), ((818, 19, 818, 33), 'numpy.zeros', 'np.zeros', ({(818, 28, 818, 32): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((828, 20, 828, 53), 'numpy.unique', 'np.unique', (), '', True, 'import numpy as np\n'), ((937, 15, 939, 9), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1001, 13, 1001, 97), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1030, 31, 1030, 47), 'numpy.empty', 'np.empty', ({(1030, 40, 1030, 46): '(0, d)'}, {}), '((0, d))', True, 'import numpy as np\n'), ((1121, 15, 1121, 85), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1138, 26, 1138, 42), 'numpy.zeros', 'np.zeros', ({(1138, 35, 1138, 41): '(n, d)'}, {}), '((n, d))', True, 'import numpy as np\n'), ((1181, 8, 1181, 70), 'warnings.filterwarnings', 'warnings.filterwarnings', (), '', False, 'import warnings\n'), ((1189, 12, 1189, 46), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', ({(1189, 27, 1189, 45): '1 - self.alpha / 2'}, {}), '(1 - self.alpha / 2)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1192, 15, 1196, 9), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1203, 15, 1203, 65), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((1220, 15, 1220, 34), 'scipy.stats.chi2.sf', 'stats.chi2.sf', ({(1220, 29, 1220, 30): 'U', (1220, 32, 1220, 33): '(1)'}, {}), '(U, 1)', False, 'from scipy import stats\n'), ((1232, 12, 1232, 46), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', ({(1232, 27, 1232, 45): '1 - self.alpha / 2'}, {}), '(1 - self.alpha / 2)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1262, 18, 1262, 36), 'lifelines.utils.string_justify', 'string_justify', ({(1262, 33, 1262, 35): '25'}, {}), '(25)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1289, 12, 1289, 61), 'lifelines.fitters.Printer', 'Printer', ({(1289, 20, 1289, 27): 'headers', (1289, 29, 1289, 33): 'self', (1289, 35, 1289, 42): 'justify', (1289, 44, 1289, 52): 'decimals', (1289, 54, 1289, 60): 'kwargs'}, {}), '(headers, self, justify, decimals, kwargs)', False, 'from lifelines.fitters import BaseFitter, Printer\n'), ((1313, 18, 1313, 72), 'lifelines.statistics.chisq_test', 'chisq_test', (), '', False, 'from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult\n'), ((1314, 15, 1320, 9), 'lifelines.statistics.StatisticalResult', 'StatisticalResult', (), '', False, 'from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult\n'), ((1380, 16, 1380, 29), 'lifelines.utils._get_index', '_get_index', ({(1380, 27, 1380, 28): 'X'}, {}), '(X)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1390, 12, 1390, 51), 'lifelines.utils.normalize', 'normalize', ({(1390, 22, 1390, 23): 'X', (1390, 25, 1390, 47): 'self._norm_mean.values', (1390, 49, 1390, 50): '1'}, {}), '(X, self._norm_mean.values, 1)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1541, 19, 1541, 32), 'lifelines.utils._get_index', '_get_index', ({(1541, 30, 1541, 31): 'X'}, {}), '(X)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1604, 19, 1604, 32), 'lifelines.utils._get_index', '_get_index', ({(1604, 30, 1604, 31): 'X'}, {}), '(X)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1615, 26, 1617, 9), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1662, 22, 1662, 63), 'numpy.exp', 'np.exp', ({(1662, 29, 1662, 62): '-self.baseline_cumulative_hazard_'}, {}), '(-self.baseline_cumulative_hazard_)', True, 'import numpy as np\n'), ((1706, 12, 1706, 46), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', ({(1706, 27, 1706, 45): '1 - self.alpha / 2'}, {}), '(1 - self.alpha / 2)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1802, 21, 1802, 41), 'lifelines.utils._to_list', '_to_list', ({(1802, 30, 1802, 40): 'covariates'}, {}), '(covariates)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1804, 17, 1804, 35), 'numpy.asarray', 'np.asarray', ({(1804, 28, 1804, 34): 'values'}, {}), '(values)', True, 'import numpy as np\n'), ((1815, 8, 1815, 50), 'lifelines.plotting.set_kwargs_drawstyle', 'set_kwargs_drawstyle', ({(1815, 29, 1815, 35): 'kwargs', (1815, 37, 1815, 49): '"""steps-post"""'}, {}), "(kwargs, 'steps-post')", False, 'from lifelines.plotting import set_kwargs_drawstyle\n'), ((1923, 23, 1925, 9), 'lifelines.statistics.proportional_hazard_test', 'proportional_hazard_test', (), '', False, 'from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult\n'), ((301, 23, 301, 71), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((302, 40, 302, 60), 'numpy.exp', 'np.exp', ({(302, 47, 302, 59): 'self.params_'}, {}), '(self.params_)', True, 'import numpy as np\n'), ((304, 55, 304, 95), 'numpy.outer', 'np.outer', ({(304, 64, 304, 78): 'self._norm_std', (304, 80, 304, 94): 'self._norm_std'}, {}), '(self._norm_std, self._norm_std)', True, 'import numpy as np\n'), ((433, 19, 433, 33), 'numpy.zeros', 'np.zeros', ({(433, 28, 433, 32): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((513, 25, 513, 36), 'numpy.linalg.norm', 'norm', ({(513, 30, 513, 35): 'delta'}, {}), '(delta)', False, 'from numpy.linalg import norm, inv\n'), ((563, 12, 567, 13), 'warnings.warn', 'warnings.warn', ({(564, 16, 565, 28): '("""Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"""\n % norm_delta)', (566, 16, 566, 34): 'ConvergenceWarning'}, {}), '(\n """Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"""\n % norm_delta, ConvergenceWarning)', False, 'import warnings\n'), ((621, 32, 621, 46), 'numpy.zeros', 'np.zeros', ({(621, 41, 621, 45): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((621, 48, 621, 62), 'numpy.zeros', 'np.zeros', ({(621, 57, 621, 61): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((622, 36, 622, 52), 'numpy.zeros', 'np.zeros', ({(622, 45, 622, 51): '(d, d)'}, {}), '((d, d))', True, 'import numpy as np\n'), ((622, 54, 622, 70), 'numpy.zeros', 'np.zeros', ({(622, 63, 622, 69): '(d, d)'}, {}), '((d, d))', True, 'import numpy as np\n'), ((644, 24, 644, 54), 'numpy.multiply.outer', 'np.multiply.outer', ({(644, 42, 644, 44): 'xi', (644, 46, 644, 53): 'phi_x_i'}, {}), '(xi, phi_x_i)', True, 'import numpy as np\n'), ((696, 26, 696, 40), 'numpy.zeros', 'np.zeros', ({(696, 35, 696, 39): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((698, 24, 698, 38), 'numpy.zeros', 'np.zeros', ({(698, 33, 698, 37): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((699, 26, 699, 42), 'numpy.zeros', 'np.zeros', ({(699, 35, 699, 41): '(d, d)'}, {}), '((d, d))', True, 'import numpy as np\n'), ((824, 32, 824, 46), 'numpy.zeros', 'np.zeros', ({(824, 41, 824, 45): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((824, 48, 824, 62), 'numpy.zeros', 'np.zeros', ({(824, 57, 824, 61): '(d,)'}, {}), '((d,))', True, 'import numpy as np\n'), ((825, 36, 825, 52), 'numpy.zeros', 'np.zeros', ({(825, 45, 825, 51): '(d, d)'}, {}), '((d, d))', True, 'import numpy as np\n'), ((825, 54, 825, 70), 'numpy.zeros', 'np.zeros', ({(825, 63, 825, 69): '(d, d)'}, {}), '((d, d))', True, 'import numpy as np\n'), ((843, 24, 843, 49), 'numpy.dot', 'np.dot', ({(843, 31, 843, 39): 'X_at_t.T', (843, 41, 843, 48): 'phi_x_i'}, {}), '(X_at_t.T, phi_x_i)', True, 'import numpy as np\n'), ((864, 26, 864, 73), 'numpy.einsum', 'np.einsum', ({(864, 36, 864, 45): '"""a,ab->b"""', (864, 47, 864, 61): 'weights_deaths', (864, 63, 864, 72): 'xi_deaths'}, {}), "('a,ab->b', weights_deaths, xi_deaths)", True, 'import numpy as np\n'), ((932, 28, 932, 39), 'numpy.empty', 'np.empty', ({(932, 37, 932, 38): '0'}, {}), '(0)', True, 'import numpy as np\n'), ((945, 13, 945, 41), 'numpy.warnings.catch_warnings', 'np.warnings.catch_warnings', ({}, {}), '()', True, 'import numpy as np\n'), ((946, 12, 946, 48), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', ({(946, 39, 946, 47): '"""ignore"""'}, {}), "('ignore')", True, 'import numpy as np\n'), ((949, 19, 949, 33), 'numpy.sign', 'np.sign', ({(949, 27, 949, 32): 'rmart'}, {}), '(rmart)', True, 'import numpy as np\n'), ((949, 36, 949, 68), 'numpy.sqrt', 'np.sqrt', ({(949, 44, 949, 67): '(-2 * (rmart + log_term))'}, {}), '(-2 * (rmart + log_term))', True, 'import numpy as np\n'), ((990, 35, 990, 51), 'numpy.empty', 'np.empty', ({(990, 44, 990, 50): '(0, d)'}, {}), '((0, d))', True, 'import numpy as np\n'), ((1013, 15, 1013, 24), 'numpy.any', 'np.any', ({(1013, 22, 1013, 23): 'E'}, {}), '(E)', True, 'import numpy as np\n'), ((1016, 19, 1016, 35), 'numpy.zeros', 'np.zeros', ({(1016, 28, 1016, 34): '(n, d)'}, {}), '((n, d))', True, 'import numpy as np\n'), ((1020, 32, 1020, 48), 'numpy.zeros', 'np.zeros', ({(1020, 41, 1020, 47): '(1, d)'}, {}), '((1, d))', True, 'import numpy as np\n'), ((1020, 50, 1020, 66), 'numpy.zeros', 'np.zeros', ({(1020, 59, 1020, 65): '(1, d)'}, {}), '((1, d))', True, 'import numpy as np\n'), ((1070, 28, 1070, 44), 'numpy.zeros', 'np.zeros', ({(1070, 37, 1070, 43): '(1, d)'}, {}), '((1, d))', True, 'import numpy as np\n'), ((1086, 24, 1086, 40), 'numpy.zeros', 'np.zeros', ({(1086, 33, 1086, 39): '(1, d)'}, {}), '((1, d))', True, 'import numpy as np\n'), ((1111, 30, 1111, 46), 'numpy.empty', 'np.empty', ({(1111, 39, 1111, 45): '(0, d)'}, {}), '((0, d))', True, 'import numpy as np\n'), ((1140, 23, 1140, 38), 'numpy.dot', 'np.dot', ({(1140, 30, 1140, 31): 'X', (1140, 33, 1140, 37): 'beta'}, {}), '(X, beta)', True, 'import numpy as np\n'), ((1233, 13, 1233, 90), 'numpy.errstate', 'np.errstate', (), '', True, 'import numpy as np\n'), ((1234, 17, 1234, 55), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1391, 28, 1391, 51), 'numpy.dot', 'np.dot', ({(1391, 35, 1391, 36): 'X', (1391, 38, 1391, 50): 'self.params_'}, {}), '(X, self.params_)', True, 'import numpy as np\n'), ((1428, 33, 1428, 47), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((1464, 18, 1464, 31), 'lifelines.utils._get_index', '_get_index', ({(1464, 29, 1464, 30): 'v'}, {}), '(v)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1465, 21, 1465, 76), 'lifelines.utils.coalesce', 'coalesce', ({(1465, 30, 1465, 35): 'times', (1465, 37, 1465, 75): 'self.baseline_cumulative_hazard_.index'}, {}), '(times, self.baseline_cumulative_hazard_.index)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1478, 33, 1478, 94), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1606, 28, 1606, 54), 'scipy.integrate.trapz', 'trapz', ({(1606, 34, 1606, 44): 'v.values.T', (1606, 46, 1606, 53): 'v.index'}, {}), '(v.values.T, v.index)', False, 'from scipy.integrate import trapz\n'), ((1697, 17, 1697, 26), 'matplotlib.pyplot.gca', 'plt.gca', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((1716, 84, 1716, 107), 'numpy.argsort', 'np.argsort', ({(1716, 95, 1716, 106): 'log_hazards'}, {}), '(log_hazards)', True, 'import numpy as np\n'), ((1719, 30, 1719, 49), 'numpy.exp', 'np.exp', ({(1719, 37, 1719, 48): 'log_hazards'}, {}), '(log_hazards)', True, 'import numpy as np\n'), ((1820, 16, 1820, 52), 'pandas.concat', 'pd.concat', ({(1820, 26, 1820, 51): '[x_bar] * values.shape[0]'}, {}), '([x_bar] * values.shape[0])', True, 'import pandas as pd\n'), ((2097, 39, 2097, 91), 'lifelines.utils.concordance._concordance_ratio', '_concordance_ratio', ({(2097, 58, 2097, 69): 'num_correct', (2097, 71, 2097, 79): 'num_tied', (2097, 81, 2097, 90): 'num_pairs'}, {}), '(num_correct, num_tied, num_pairs)', False, 'from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio\n'), ((304, 33, 304, 52), 'numpy.linalg.inv', 'inv', ({(304, 37, 304, 51): 'self._hessian_'}, {}), '(self._hessian_)', False, 'from numpy.linalg import norm, inv\n'), ((342, 27, 342, 52), 'numpy.ones', 'np.ones', ({(342, 35, 342, 51): 'self._n_examples'}, {}), '(self._n_examples)', True, 'import numpy as np\n'), ((347, 27, 347, 55), 'numpy.ones', 'np.ones', ({(347, 35, 347, 54): '(self._n_examples,)'}, {}), '((self._n_examples,))', True, 'import numpy as np\n'), ((372, 16, 378, 17), 'warnings.warn', 'warnings.warn', ({(373, 20, 376, 3): '"""It appears your weights are not integers, possibly propensity or sampling scores then?\nIt\'s important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to\nestimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis\\"\n"""', (377, 20, 377, 38): 'StatisticalWarning'}, {}), '(\n """It appears your weights are not integers, possibly propensity or sampling scores then?\nIt\'s important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to\nestimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis\\"\n"""\n , StatisticalWarning)', False, 'import warnings\n'), ((461, 20, 461, 39), 'numpy.zeros_like', 'np.zeros_like', ({(461, 34, 461, 38): 'beta'}, {}), '(beta)', True, 'import numpy as np\n'), ((462, 20, 462, 60), 'numpy.zeros', 'np.zeros', ({(462, 29, 462, 59): '(beta.shape[0], beta.shape[0])'}, {}), '((beta.shape[0], beta.shape[0]))', True, 'import numpy as np\n'), ((469, 26, 469, 43), 'numpy.all', 'np.all', ({(469, 33, 469, 42): '(beta == 0)'}, {}), '(beta == 0)', True, 'import numpy as np\n'), ((482, 32, 482, 82), 'scipy.linalg.solve', 'spsolve', (), '', True, 'from scipy.linalg import solve as spsolve, LinAlgError\n'), ((504, 22, 504, 37), 'numpy.isnan', 'np.isnan', ({(504, 31, 504, 36): 'delta'}, {}), '(delta)', True, 'import numpy as np\n'), ((505, 22, 509, 17), 'lifelines.utils.ConvergenceError', 'ConvergenceError', ({(506, 20, 508, 3): '"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""'}, {}), '(\n """delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""\n )', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((569, 12, 571, 13), 'warnings.warn', 'warnings.warn', ({(570, 16, 570, 92): "('Newton-Rhaphson failed to converge sufficiently in %d steps.\\n' % max_steps)", (570, 94, 570, 112): 'ConvergenceWarning'}, {}), "(\n 'Newton-Rhaphson failed to converge sufficiently in %d steps.\\n' %\n max_steps, ConvergenceWarning)", False, 'import warnings\n'), ((627, 34, 627, 49), 'numpy.dot', 'np.dot', ({(627, 41, 627, 42): 'X', (627, 44, 627, 48): 'beta'}, {}), '(X, beta)', True, 'import numpy as np\n'), ((739, 25, 739, 41), 'numpy.log', 'np.log', ({(739, 32, 739, 40): 'risk_phi'}, {}), '(risk_phi)', True, 'import numpy as np\n'), ((790, 25, 790, 41), 'numpy.log', 'np.log', ({(790, 32, 790, 40): 'risk_phi'}, {}), '(risk_phi)', True, 'import numpy as np\n'), ((829, 34, 829, 49), 'numpy.dot', 'np.dot', ({(829, 41, 829, 42): 'X', (829, 44, 829, 48): 'beta'}, {}), '(X, beta)', True, 'import numpy as np\n'), ((878, 30, 878, 65), 'numpy.dot', 'np.dot', ({(878, 37, 878, 48): 'xi_deaths.T', (878, 50, 878, 64): 'phi_x_i_deaths'}, {}), '(xi_deaths.T, phi_x_i_deaths)', True, 'import numpy as np\n'), ((934, 32, 934, 104), 'numpy.append', 'np.append', ({(934, 42, 934, 55): 'baseline_at_T', (934, 57, 934, 103): 'self.baseline_cumulative_hazard_[name].loc[T_]'}, {}), '(baseline_at_T, self.baseline_cumulative_hazard_[name].loc[T_])', True, 'import numpy as np\n'), ((995, 39, 995, 110), 'numpy.append', 'np.append', (), '', True, 'import numpy as np\n'), ((1026, 34, 1026, 57), 'numpy.dot', 'np.dot', ({(1026, 41, 1026, 42): 'X', (1026, 44, 1026, 56): 'self.params_'}, {}), '(X, self.params_)', True, 'import numpy as np\n'), ((1080, 39, 1080, 105), 'numpy.append', 'np.append', (), '', True, 'import numpy as np\n'), ((1099, 57, 1099, 95), 'numpy.tile', 'np.tile', ({(1099, 65, 1099, 86): 'self._norm_std.values', (1099, 88, 1099, 94): '(d, 1)'}, {}), '(self._norm_std.values, (d, 1))', True, 'import numpy as np\n'), ((1116, 34, 1116, 95), 'numpy.append', 'np.append', (), '', True, 'import numpy as np\n'), ((1240, 68, 1240, 102), 'numpy.exp', 'np.exp', ({(1240, 75, 1240, 101): '(-z * self.standard_errors_)'}, {}), '(-z * self.standard_errors_)', True, 'import numpy as np\n'), ((1241, 68, 1241, 101), 'numpy.exp', 'np.exp', ({(1241, 75, 1241, 100): '(z * self.standard_errors_)'}, {}), '(z * self.standard_errors_)', True, 'import numpy as np\n'), ((1244, 30, 1244, 46), 'numpy.log2', 'np.log2', ({(1244, 38, 1244, 45): "df['p']"}, {}), "(df['p'])", True, 'import numpy as np\n'), ((1440, 22, 1440, 46), 'lifelines.utils._get_index', '_get_index', ({(1440, 33, 1440, 45): 'stratified_X'}, {}), '(stratified_X)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1442, 25, 1442, 80), 'lifelines.utils.coalesce', 'coalesce', ({(1442, 34, 1442, 39): 'times', (1442, 41, 1442, 79): 'self.baseline_cumulative_hazard_.index'}, {}), '(times, self.baseline_cumulative_hazard_.index)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1470, 22, 1470, 98), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', ({(1470, 43, 1470, 75): 'self.baseline_cumulative_hazard_', (1470, 77, 1470, 97): 'times_to_evaluate_at'}, {}), '(self.baseline_cumulative_hazard_, times_to_evaluate_at)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1471, 40, 1471, 113), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', ({(1471, 61, 1471, 93): 'self.baseline_cumulative_hazard_', (1471, 95, 1471, 112): 'conditional_after'}, {}), '(self.baseline_cumulative_hazard_, conditional_after)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1472, 22, 1472, 73), 'numpy.clip', 'np.clip', ({(1472, 30, 1472, 61): '(c_0 - c_0_conditional_after).T', (1472, 63, 1472, 64): '0', (1472, 66, 1472, 72): 'np.inf'}, {}), '((c_0 - c_0_conditional_after).T, 0, np.inf)', True, 'import numpy as np\n'), ((1475, 39, 1475, 62), 'numpy.tile', 'np.tile', ({(1475, 47, 1475, 53): 'times_', (1475, 55, 1475, 61): '(n, 1)'}, {}), '(times_, (n, 1))', True, 'import numpy as np\n'), ((1822, 30, 1822, 50), 'numpy.eye', 'np.eye', ({(1822, 37, 1822, 49): 'n_covariates'}, {}), '(n_covariates)', True, 'import numpy as np\n'), ((1842, 20, 1842, 56), 'pandas.concat', 'pd.concat', ({(1842, 30, 1842, 55): '[x_bar] * values.shape[0]'}, {}), '([x_bar] * values.shape[0])', True, 'import pandas as pd\n'), ((1855, 16, 1855, 28), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((1934, 15, 1934, 52), 'numpy.round', 'np.round', ({(1934, 24, 1934, 48): 'minumum_observed_p_value', (1934, 50, 1934, 51): '(2)'}, {}), '(minumum_observed_p_value, 2)', True, 'import numpy as np\n'), ((2013, 22, 2013, 34), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((2043, 16, 2043, 34), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((2044, 16, 2044, 45), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', (), '', True, 'from matplotlib import pyplot as plt\n'), ((2048, 16, 2057, 17), 'textwrap.dedent', 'dedent', ({(2049, 20, 2056, 15): '"""\n ---\n [A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html\n [B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it\n [C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates\n [D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form\n [E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification\n """'}, {}), '(\n """\n ---\n [A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html\n [B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it\n [C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates\n [D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form\n [E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification\n """\n )', False, 'from textwrap import dedent, fill\n'), ((2093, 51, 2095, 17), 'lifelines.utils.concordance._concordance_summary_statistics', '_concordance_summary_statistics', ({(2094, 20, 2094, 34): "df['T'].values", (2094, 36, 2094, 51): "-df['P'].values", (2094, 53, 2094, 67): "df['E'].values"}, {}), "(df['T'].values, -df['P'].values, df['E'].values\n )", False, 'from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio\n'), ((272, 36, 272, 53), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime\n'), ((495, 22, 500, 17), 'lifelines.utils.ConvergenceError', 'ConvergenceError', ({(496, 20, 498, 3): '"""Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""', (499, 20, 499, 21): 'e'}, {}), '(\n """Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""\n , e)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((674, 40, 674, 68), 'numpy.arange', 'np.arange', ({(674, 50, 674, 67): 'tied_death_counts'}, {}), '(tied_death_counts)', True, 'import numpy as np\n'), ((676, 37, 676, 79), 'numpy.outer', 'np.outer', ({(676, 46, 676, 67): 'increasing_proportion', (676, 69, 676, 78): 'tie_phi_x'}, {}), '(increasing_proportion, tie_phi_x)', True, 'import numpy as np\n'), ((677, 21, 677, 63), 'numpy.einsum', 'np.einsum', ({(677, 31, 677, 41): '"""ab,i->ab"""', (677, 43, 677, 55): 'risk_phi_x_x', (677, 57, 677, 62): 'denom'}, {}), "('ab,i->ab', risk_phi_x_x, denom)", True, 'import numpy as np\n'), ((677, 66, 679, 17), 'numpy.einsum', 'np.einsum', ({(678, 20, 678, 30): '"""ab,i->ab"""', (678, 32, 678, 43): 'tie_phi_x_x', (678, 45, 678, 74): '(increasing_proportion * denom)'}, {}), "('ab,i->ab', tie_phi_x_x, increasing_proportion * denom)", True, 'import numpy as np\n'), ((681, 30, 681, 50), 'numpy.array', 'np.array', ({(681, 39, 681, 49): '[risk_phi]'}, {}), '([risk_phi])', True, 'import numpy as np\n'), ((690, 32, 690, 57), 'numpy.dot', 'np.dot', ({(690, 39, 690, 50): 'x_death_sum', (690, 52, 690, 56): 'beta'}, {}), '(x_death_sum, beta)', True, 'import numpy as np\n'), ((882, 37, 882, 79), 'numpy.outer', 'np.outer', ({(882, 46, 882, 67): 'increasing_proportion', (882, 69, 882, 78): 'tie_phi_x'}, {}), '(increasing_proportion, tie_phi_x)', True, 'import numpy as np\n'), ((891, 21, 891, 63), 'numpy.einsum', 'np.einsum', ({(891, 31, 891, 41): '"""ab,i->ab"""', (891, 43, 891, 55): 'risk_phi_x_x', (891, 57, 891, 62): 'denom'}, {}), "('ab,i->ab', risk_phi_x_x, denom)", True, 'import numpy as np\n'), ((891, 66, 893, 17), 'numpy.einsum', 'np.einsum', ({(892, 20, 892, 30): '"""ab,i->ab"""', (892, 32, 892, 43): 'tie_phi_x_x', (892, 45, 892, 74): '(increasing_proportion * denom)'}, {}), "('ab,i->ab', tie_phi_x_x, increasing_proportion * denom)", True, 'import numpy as np\n'), ((896, 30, 896, 50), 'numpy.array', 'np.array', ({(896, 39, 896, 49): '[risk_phi]'}, {}), '([risk_phi])', True, 'import numpy as np\n'), ((907, 32, 907, 57), 'numpy.dot', 'np.dot', ({(907, 39, 907, 50): 'x_death_sum', (907, 52, 907, 56): 'beta'}, {}), '(x_death_sum, beta)', True, 'import numpy as np\n'), ((947, 78, 947, 109), 'numpy.log', 'np.log', ({(947, 85, 947, 108): 'E.values - rmart.values'}, {}), '(E.values - rmart.values)', True, 'import numpy as np\n'), ((1423, 20, 1423, 40), 'numpy.atleast_1d', 'np.atleast_1d', ({(1423, 34, 1423, 39): 'times'}, {}), '(times)', True, 'import numpy as np\n'), ((1425, 32, 1425, 63), 'lifelines.utils._to_1d_array', '_to_1d_array', ({(1425, 45, 1425, 62): 'conditional_after'}, {}), '(conditional_after)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1447, 27, 1447, 81), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', ({(1447, 48, 1447, 58): 'strata_c_0', (1447, 60, 1447, 80): 'times_to_evaluate_at'}, {}), '(strata_c_0, times_to_evaluate_at)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1448, 44, 1448, 95), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', ({(1448, 65, 1448, 75): 'strata_c_0', (1448, 77, 1448, 94): 'conditional_after'}, {}), '(strata_c_0, conditional_after)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1449, 27, 1449, 79), 'numpy.clip', 'np.clip', ({(1449, 35, 1449, 67): '(c_0_ - c_0_conditional_after).T', (1449, 69, 1449, 70): '0', (1449, 72, 1449, 78): 'np.inf'}, {}), '((c_0_ - c_0_conditional_after).T, 0, np.inf)', True, 'import numpy as np\n'), ((1452, 43, 1452, 67), 'numpy.tile', 'np.tile', ({(1452, 51, 1452, 57): 'times_', (1452, 59, 1452, 66): '(n_, 1)'}, {}), '(times_, (n_, 1))', True, 'import numpy as np\n'), ((1456, 20, 1456, 82), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1468, 39, 1468, 62), 'numpy.tile', 'np.tile', ({(1468, 47, 1468, 53): 'times_', (1468, 55, 1468, 61): '(n, 1)'}, {}), '(times_, (n, 1))', True, 'import numpy as np\n'), ((1476, 22, 1476, 98), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', ({(1476, 43, 1476, 75): 'self.baseline_cumulative_hazard_', (1476, 77, 1476, 97): 'times_to_evaluate_at'}, {}), '(self.baseline_cumulative_hazard_, times_to_evaluate_at)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1625, 32, 1625, 57), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((1720, 46, 1720, 95), 'numpy.exp', 'np.exp', ({(1720, 53, 1720, 94): '(z * self.standard_errors_[columns].values)'}, {}), '(z * self.standard_errors_[columns].values)', True, 'import numpy as np\n'), ((1721, 50, 1721, 100), 'numpy.exp', 'np.exp', ({(1721, 57, 1721, 99): '(-z * self.standard_errors_[columns].values)'}, {}), '(-z * self.standard_errors_[columns].values)', True, 'import numpy as np\n'), ((1725, 21, 1725, 74), 'numpy.vstack', 'np.vstack', ({(1725, 31, 1725, 73): '[lower_errors[order], upper_errors[order]]'}, {}), '([lower_errors[order], upper_errors[order]])', True, 'import numpy as np\n'), ((1839, 39, 1839, 60), 'lifelines.utils._to_list', '_to_list', ({(1839, 48, 1839, 59): 'self.strata'}, {}), '(self.strata)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1839, 62, 1839, 80), 'lifelines.utils._to_tuple', '_to_tuple', ({(1839, 72, 1839, 79): 'stratum'}, {}), '(stratum)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((2026, 31, 2026, 58), 'lifelines.utils.lowess.lowess', 'lowess', ({(2026, 38, 2026, 47): 'tt.values', (2026, 49, 2026, 57): 'y.values'}, {}), '(tt.values, y.values)', False, 'from lifelines.utils.lowess import lowess\n'), ((2085, 58, 2087, 21), 'lifelines.utils.concordance._concordance_summary_statistics', '_concordance_summary_statistics', ({(2086, 24, 2086, 39): "_df['T'].values", (2086, 41, 2086, 57): "-_df['P'].values", (2086, 59, 2086, 74): "_df['E'].values"}, {}), "(_df['T'].values, -_df['P'].values, _df['E']\n .values)", False, 'from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio\n'), ((330, 35, 330, 56), 'lifelines.utils._to_list', '_to_list', ({(330, 44, 330, 55): 'self.strata'}, {}), '(self.strata)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((485, 26, 490, 21), 'lifelines.utils.ConvergenceError', 'ConvergenceError', ({(486, 24, 488, 3): '"""Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""', (489, 24, 489, 25): 'e'}, {}), '(\n """Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:\nhttps://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model\n"""\n , e)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1445, 43, 1445, 67), 'numpy.tile', 'np.tile', ({(1445, 51, 1445, 57): 'times_', (1445, 59, 1445, 66): '(n_, 1)'}, {}), '(times_, (n_, 1))', True, 'import numpy as np\n'), ((1453, 27, 1453, 81), 'lifelines.utils.interpolate_at_times', 'interpolate_at_times', ({(1453, 48, 1453, 58): 'strata_c_0', (1453, 60, 1453, 80): 'times_to_evaluate_at'}, {}), '(strata_c_0, times_to_evaluate_at)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((1818, 45, 1818, 57), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((1836, 21, 1836, 33), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((1942, 24, 1946, 25), 'textwrap.fill', 'fill', (), '', False, 'from textwrap import dedent, fill\n'), ((1950, 24, 1953, 25), 'textwrap.fill', 'fill', (), '', False, 'from textwrap import dedent, fill\n'), ((2002, 24, 2005, 25), 'textwrap.fill', 'fill', (), '', False, 'from textwrap import dedent, fill\n'), ((2033, 35, 2033, 60), 'lifelines.utils.lowess.lowess', 'lowess', ({(2033, 42, 2033, 45): 'tt_', (2033, 47, 2033, 59): 'y.values[ix]'}, {}), '(tt_, y.values[ix])', False, 'from lifelines.utils.lowess import lowess\n'), ((690, 79, 690, 92), 'numpy.log', 'np.log', ({(690, 86, 690, 91): 'denom'}, {}), '(denom)', True, 'import numpy as np\n'), ((907, 79, 907, 92), 'numpy.log', 'np.log', ({(907, 86, 907, 91): 'denom'}, {}), '(denom)', True, 'import numpy as np\n'), ((1065, 75, 1065, 91), 'numpy.zeros', 'np.zeros', ({(1065, 84, 1065, 90): '(1, d)'}, {}), '((1, d))', True, 'import numpy as np\n'), ((1434, 24, 1438, 25), 'textwrap.dedent', 'dedent', ({(1435, 28, 1437, 61): '("""The stratum %s was not found in the original training data. For example, try\n the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output."""\n % (stratum, self.strata, stratum))'}, {}), '(\n """The stratum %s was not found in the original training data. For example, try\n the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output."""\n % (stratum, self.strata, stratum))', False, 'from textwrap import dedent, fill\n'), ((1962, 38, 1962, 55), 'lifelines.utils.format_p_value', 'format_p_value', ({(1962, 53, 1962, 54): '(4)'}, {}), '(4)', False, 'from lifelines.utils import _get_index, _to_list, _to_tuple, _to_1d_array, inv_normal_cdf, normalize, qth_survival_times, coalesce, check_for_numeric_dtypes_or_raise, check_low_var, check_complete_separation, check_nans_or_infs, StatError, ConvergenceWarning, StatisticalWarning, StepSizer, ConvergenceError, string_justify, interpolate_at_times_and_return_pandas, CensoringType, interpolate_at_times, format_p_value\n'), ((2016, 66, 2016, 84), 'lifelines.statistics.TimeTransformers', 'TimeTransformers', ({}, {}), '()', False, 'from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult\n'), ((2031, 36, 2031, 58), 'numpy.random.choice', 'np.random.choice', ({(2031, 53, 2031, 54): 'n', (2031, 56, 2031, 57): 'n'}, {}), '(n, n)', True, 'import numpy as np\n'), ((521, 71, 521, 82), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((540, 16, 544, 17), 'warnings.warn', 'warnings.warn', ({(541, 20, 542, 62): '"""The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. See https://stats.stackexchange.com/q/11109/11867 for more.\n"""', (543, 20, 543, 38): 'ConvergenceWarning'}, {}), '(\n """The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. See https://stats.stackexchange.com/q/11109/11867 for more.\n"""\n , ConvergenceWarning)', False, 'import warnings\n'), ((737, 43, 737, 71), 'numpy.arange', 'np.arange', ({(737, 53, 737, 70): 'tied_death_counts'}, {}), '(tied_death_counts)', True, 'import numpy as np\n'), ((788, 43, 788, 71), 'numpy.arange', 'np.arange', ({(788, 53, 788, 70): 'tied_death_counts'}, {}), '(tied_death_counts)', True, 'import numpy as np\n')] |
asevans48/NLPServer | nlp_server/config/test/test_config.py | 6feb1d89748165f9efea40d0777d355044c48176 | """
Test configuration loading
@author aevans
"""
import os
from nlp_server.config import load_config
def test_load_config():
"""
Test loading a configuration
"""
current_dir = os.path.curdir
test_path = os.path.sep.join([current_dir, 'data', 'test_config.json'])
cfg = load_config.load_config(test_path)
assert cfg is not None
assert cfg.use_gpu is False
| [((17, 16, 17, 75), 'os.path.sep.join', 'os.path.sep.join', ({(17, 33, 17, 74): "[current_dir, 'data', 'test_config.json']"}, {}), "([current_dir, 'data', 'test_config.json'])", False, 'import os\n'), ((18, 10, 18, 44), 'nlp_server.config.load_config.load_config', 'load_config.load_config', ({(18, 34, 18, 43): 'test_path'}, {}), '(test_path)', False, 'from nlp_server.config import load_config\n')] |
ektai/frappe3 | frappe/utils/safe_exec.py | 44aa948b4d5a0d729eacfb3dabdc9c8894ae1799 |
import os, json, inspect
import mimetypes
from html2text import html2text
from RestrictedPython import compile_restricted, safe_globals
import RestrictedPython.Guards
import frappe
import frappe.utils
import frappe.utils.data
from frappe.website.utils import (get_shade, get_toc, get_next_link)
from frappe.modules import scrub
from frappe.www.printview import get_visible_columns
import frappe.exceptions
class ServerScriptNotEnabled(frappe.PermissionError): pass
def safe_exec(script, _globals=None, _locals=None):
# script reports must be enabled via site_config.json
if not frappe.conf.server_script_enabled:
frappe.msgprint('Please Enable Server Scripts')
raise ServerScriptNotEnabled
# build globals
exec_globals = get_safe_globals()
if _globals:
exec_globals.update(_globals)
# execute script compiled by RestrictedPython
exec(compile_restricted(script), exec_globals, _locals) # pylint: disable=exec-used
def get_safe_globals():
datautils = frappe._dict()
if frappe.db:
date_format = frappe.db.get_default("date_format") or "yyyy-mm-dd"
time_format = frappe.db.get_default("time_format") or "HH:mm:ss"
else:
date_format = "yyyy-mm-dd"
time_format = "HH:mm:ss"
add_module_properties(frappe.utils.data, datautils, lambda obj: hasattr(obj, "__call__"))
if "_" in getattr(frappe.local, 'form_dict', {}):
del frappe.local.form_dict["_"]
user = getattr(frappe.local, "session", None) and frappe.local.session.user or "Guest"
out = frappe._dict(
# make available limited methods of frappe
json=json,
dict=dict,
frappe=frappe._dict(
_=frappe._,
_dict=frappe._dict,
flags=frappe.flags,
format=frappe.format_value,
format_value=frappe.format_value,
date_format=date_format,
time_format=time_format,
format_date=frappe.utils.data.global_date_format,
form_dict=getattr(frappe.local, 'form_dict', {}),
get_meta=frappe.get_meta,
get_doc=frappe.get_doc,
get_cached_doc=frappe.get_cached_doc,
get_list=frappe.get_list,
get_all=frappe.get_all,
get_system_settings=frappe.get_system_settings,
utils=datautils,
get_url=frappe.utils.get_url,
render_template=frappe.render_template,
msgprint=frappe.msgprint,
user=user,
get_fullname=frappe.utils.get_fullname,
get_gravatar=frappe.utils.get_gravatar_url,
full_name=frappe.local.session.data.full_name if getattr(frappe.local, "session", None) else "Guest",
request=getattr(frappe.local, 'request', {}),
session=frappe._dict(
user=user,
csrf_token=frappe.local.session.data.csrf_token if getattr(frappe.local, "session", None) else ''
),
socketio_port=frappe.conf.socketio_port,
get_hooks=frappe.get_hooks,
),
style=frappe._dict(
border_color='#d1d8dd'
),
get_toc=get_toc,
get_next_link=get_next_link,
_=frappe._,
get_shade=get_shade,
scrub=scrub,
guess_mimetype=mimetypes.guess_type,
html2text=html2text,
dev_server=1 if os.environ.get('DEV_SERVER', False) else 0
)
add_module_properties(frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception))
if not frappe.flags.in_setup_help:
out.get_visible_columns = get_visible_columns
out.frappe.date_format = date_format
out.frappe.time_format = time_format
out.frappe.db = frappe._dict(
get_list = frappe.get_list,
get_all = frappe.get_all,
get_value = frappe.db.get_value,
set_value = frappe.db.set_value,
get_single_value = frappe.db.get_single_value,
get_default = frappe.db.get_default,
escape = frappe.db.escape,
)
if frappe.response:
out.frappe.response = frappe.response
out.update(safe_globals)
# default writer allows write access
out._write_ = _write
out._getitem_ = _getitem
# allow iterators and list comprehension
out._getiter_ = iter
out._iter_unpack_sequence_ = RestrictedPython.Guards.guarded_iter_unpack_sequence
out.sorted = sorted
return out
def _getitem(obj, key):
# guard function for RestrictedPython
# allow any key to be accessed as long as it does not start with underscore
if isinstance(key, str) and key.startswith('_'):
raise SyntaxError('Key starts with _')
return obj[key]
def _write(obj):
# guard function for RestrictedPython
# allow writing to any object
return obj
def add_module_properties(module, data, filter_method):
for key, obj in module.__dict__.items():
if key.startswith("_"):
# ignore
continue
if filter_method(obj):
# only allow functions
data[key] = obj | [((32, 13, 32, 27), 'frappe._dict', 'frappe._dict', ({}, {}), '()', False, 'import frappe\n'), ((20, 2, 20, 49), 'frappe.msgprint', 'frappe.msgprint', ({(20, 18, 20, 48): '"""Please Enable Server Scripts"""'}, {}), "('Please Enable Server Scripts')", False, 'import frappe\n'), ((29, 6, 29, 32), 'RestrictedPython.compile_restricted', 'compile_restricted', ({(29, 25, 29, 31): 'script'}, {}), '(script)', False, 'from RestrictedPython import compile_restricted, safe_globals\n'), ((106, 18, 114, 3), 'frappe._dict', 'frappe._dict', (), '', False, 'import frappe\n'), ((34, 16, 34, 52), 'frappe.db.get_default', 'frappe.db.get_default', ({(34, 38, 34, 51): '"""date_format"""'}, {}), "('date_format')", False, 'import frappe\n'), ((35, 16, 35, 52), 'frappe.db.get_default', 'frappe.db.get_default', ({(35, 38, 35, 51): '"""time_format"""'}, {}), "('time_format')", False, 'import frappe\n'), ((87, 8, 89, 3), 'frappe._dict', 'frappe._dict', (), '', False, 'import frappe\n'), ((97, 18, 97, 53), 'os.environ.get', 'os.environ.get', ({(97, 33, 97, 45): '"""DEV_SERVER"""', (97, 47, 97, 52): 'False'}, {}), "('DEV_SERVER', False)", False, 'import os, json, inspect\n'), ((100, 66, 100, 86), 'inspect.isclass', 'inspect.isclass', ({(100, 82, 100, 85): 'obj'}, {}), '(obj)', False, 'import os, json, inspect\n')] |
BarracudaPff/code-golf-data-pythpn | simplejson/ordered_dict.py | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | """Drop-in replacement for collections.OrderedDict by Raymond Hettinger
http://code.activestate.com/recipes/576693/
"""
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError("expected at most 1 arguments, got %d" % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end]
self.__map = {}
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError("dictionary is empty")
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return "%s()" % (self.__class__.__name__,)
return "%s(%r)" % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self) == len(other) and all(p == q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other | [] |
KRHS-GameProgramming-2015/Adlez | Water.py | 8912da1ee4b3c7b105851dbcc00579ff0c3cf33e | from HardBlock import *
class Water(HardBlock):
def __init__(self, pos=[0,0], blockSize = 25):
image = "Block/Block Images/water.png"
HardBlock.__init__(self, image, pos, blockSize)
def update(*args):
pass
| [] |
bgalbraith/minerl-haiku-baselines | baselines/bc.py | c33b14699af14c904394d9c4e30dee680a8718d6 | import dill
import haiku as hk
import jax
from jax.experimental import optix
import jax.numpy as jnp
from dataset import load_data
MINERL_ENV = 'MineRLTreechopVectorObf-v0'
PARAMS_FILENAME = 'bc_params_treechop.pkl'
class PovStack(hk.Module):
""" PovStack is a module for processing the point-of-view image data that
comes from the agent's viewport. This input is in NHWC format for a shape
of (N, 64, 64, 3).
This model is inspired from
https://github.com/minerllabs/baselines/blob/master/general/chainerrl/baselines/behavioral_cloning.py
"""
def __init__(self, name=None):
super().__init__(name=name)
conv_0 = hk.Conv2D(output_channels=32,
kernel_shape=(8, 8),
stride=4,
padding='SAME',
name='conv_0')
layer_0 = (conv_0, jax.nn.relu)
conv_1 = hk.Conv2D(output_channels=64,
kernel_shape=(4, 4),
stride=2,
padding='SAME',
name='conv_1')
layer_1 = (conv_1, jax.nn.relu)
conv_2 = hk.Conv2D(output_channels=64,
kernel_shape=(3, 3),
stride=1,
padding='SAME',
name='conv_2')
layer_2 = (conv_2, jax.nn.relu)
layer_3 = (hk.Flatten(),
hk.Linear(512, name='fc_0'), jax.nn.relu,
hk.Linear(128, name='fc_1'), jax.nn.relu)
self.layers = layer_0 + layer_1 + layer_2 + layer_3
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
class VectorStack(hk.Module):
""" VectorStack is a module for processing the obfuscated "vector" data that
is included in the agent's observation. This is a densely encoded form of
the discrete information regarding the state of the agent other than the
viewport, e.g. current inventory. The input is of shape (N, 64)
"""
def __init__(self, name=None):
super().__init__(name=name)
layer_0 = (hk.Linear(32, name='fc_0'), jax.nn.relu)
self.layers = layer_0
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
def behavioral_cloning(batch):
""" The full forward model definition """
x_0 = PovStack(name='pov_stack')(batch[0])
x_1 = VectorStack(name='vector_stack')(batch[1])
x = jnp.concatenate((x_0, x_1), axis=1)
return jnp.tanh(hk.Linear(64)(x))
@jax.jit
def mse_loss(logits, labels):
""" Mean Squared Error loss """
return jnp.mean(jnp.power(logits - labels, 2))
def main():
net = hk.transform(behavioral_cloning)
opt = optix.adam(0.001)
@jax.jit
def loss(params, batch):
""" The loss criterion for our model """
logits = net.apply(params, None, batch)
return mse_loss(logits, batch[2])
@jax.jit
def update(opt_state, params, batch):
grads = jax.grad(loss)(params, batch)
updates, opt_state = opt.update(grads, opt_state)
params = optix.apply_updates(params, updates)
return params, opt_state
@jax.jit
def accuracy(params, batch):
""" Simply report the loss for the current batch """
logits = net.apply(params, None, batch)
return mse_loss(logits, batch[2])
train_dataset, val_dataset = load_data(MINERL_ENV,
batch_size=32, epochs=100)
rng = jax.random.PRNGKey(2020)
batch = next(train_dataset)
params = net.init(rng, batch)
opt_state = opt.init(params)
for i, batch in enumerate(train_dataset):
params, opt_state = update(opt_state, params, batch)
if i % 1000 == 0:
print(accuracy(params, val_dataset))
if i % 10000 == 0:
with open(PARAMS_FILENAME, 'wb') as fh:
dill.dump(params, fh)
with open(PARAMS_FILENAME, 'wb') as fh:
dill.dump(params, fh)
if __name__ == '__main__':
main()
| [((79, 8, 79, 43), 'jax.numpy.concatenate', 'jnp.concatenate', (), '', True, 'import jax.numpy as jnp\n'), ((90, 10, 90, 42), 'haiku.transform', 'hk.transform', ({(90, 23, 90, 41): 'behavioral_cloning'}, {}), '(behavioral_cloning)', True, 'import haiku as hk\n'), ((91, 10, 91, 27), 'jax.experimental.optix.adam', 'optix.adam', ({(91, 21, 91, 26): '0.001'}, {}), '(0.001)', False, 'from jax.experimental import optix\n'), ((112, 33, 113, 69), 'dataset.load_data', 'load_data', (), '', False, 'from dataset import load_data\n'), ((115, 10, 115, 34), 'jax.random.PRNGKey', 'jax.random.PRNGKey', ({(115, 29, 115, 33): '2020'}, {}), '(2020)', False, 'import jax\n'), ((24, 17, 28, 41), 'haiku.Conv2D', 'hk.Conv2D', (), '', True, 'import haiku as hk\n'), ((31, 17, 35, 41), 'haiku.Conv2D', 'hk.Conv2D', (), '', True, 'import haiku as hk\n'), ((38, 17, 42, 41), 'haiku.Conv2D', 'hk.Conv2D', (), '', True, 'import haiku as hk\n'), ((86, 20, 86, 49), 'jax.numpy.power', 'jnp.power', ({(86, 30, 86, 45): '(logits - labels)', (86, 47, 86, 48): '(2)'}, {}), '(logits - labels, 2)', True, 'import jax.numpy as jnp\n'), ((103, 17, 103, 53), 'jax.experimental.optix.apply_updates', 'optix.apply_updates', ({(103, 37, 103, 43): 'params', (103, 45, 103, 52): 'updates'}, {}), '(params, updates)', False, 'from jax.experimental import optix\n'), ((130, 8, 130, 29), 'dill.dump', 'dill.dump', ({(130, 18, 130, 24): 'params', (130, 26, 130, 28): 'fh'}, {}), '(params, fh)', False, 'import dill\n'), ((45, 19, 45, 31), 'haiku.Flatten', 'hk.Flatten', ({}, {}), '()', True, 'import haiku as hk\n'), ((46, 19, 46, 46), 'haiku.Linear', 'hk.Linear', (), '', True, 'import haiku as hk\n'), ((47, 19, 47, 46), 'haiku.Linear', 'hk.Linear', (), '', True, 'import haiku as hk\n'), ((65, 19, 65, 45), 'haiku.Linear', 'hk.Linear', (), '', True, 'import haiku as hk\n'), ((80, 20, 80, 33), 'haiku.Linear', 'hk.Linear', ({(80, 30, 80, 32): '(64)'}, {}), '(64)', True, 'import haiku as hk\n'), ((101, 16, 101, 30), 'jax.grad', 'jax.grad', ({(101, 25, 101, 29): 'loss'}, {}), '(loss)', False, 'import jax\n'), ((127, 16, 127, 37), 'dill.dump', 'dill.dump', ({(127, 26, 127, 32): 'params', (127, 34, 127, 36): 'fh'}, {}), '(params, fh)', False, 'import dill\n')] |
ajavadia/qiskit-sdk-py | qiskit/circuit/library/templates/__init__.py | a59e8e6be1793197e19998c1f7dcfc45e6f2f3af | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
A library of template circuits.
Templates are circuits that compute the identity. They find use
in circuit optimization where matching part of the template allows the compiler
to replace the match with the inverse of the remainder from the template.
"""
from .nct.template_nct_2a_1 import template_nct_2a_1
from .nct.template_nct_2a_2 import template_nct_2a_2
from .nct.template_nct_2a_3 import template_nct_2a_3
from .nct.template_nct_4a_1 import template_nct_4a_1
from .nct.template_nct_4a_2 import template_nct_4a_2
from .nct.template_nct_4a_3 import template_nct_4a_3
from .nct.template_nct_4b_1 import template_nct_4b_1
from .nct.template_nct_4b_2 import template_nct_4b_2
from .nct.template_nct_5a_1 import template_nct_5a_1
from .nct.template_nct_5a_2 import template_nct_5a_2
from .nct.template_nct_5a_3 import template_nct_5a_3
from .nct.template_nct_5a_4 import template_nct_5a_4
from .nct.template_nct_6a_1 import template_nct_6a_1
from .nct.template_nct_6a_2 import template_nct_6a_2
from .nct.template_nct_6a_3 import template_nct_6a_3
from .nct.template_nct_6a_4 import template_nct_6a_4
from .nct.template_nct_6b_1 import template_nct_6b_1
from .nct.template_nct_6b_2 import template_nct_6b_2
from .nct.template_nct_6c_1 import template_nct_6c_1
from .nct.template_nct_7a_1 import template_nct_7a_1
from .nct.template_nct_7b_1 import template_nct_7b_1
from .nct.template_nct_7c_1 import template_nct_7c_1
from .nct.template_nct_7d_1 import template_nct_7d_1
from .nct.template_nct_7e_1 import template_nct_7e_1
from .nct.template_nct_9a_1 import template_nct_9a_1
from .nct.template_nct_9c_1 import template_nct_9c_1
from .nct.template_nct_9c_2 import template_nct_9c_2
from .nct.template_nct_9c_3 import template_nct_9c_3
from .nct.template_nct_9c_4 import template_nct_9c_4
from .nct.template_nct_9c_5 import template_nct_9c_5
from .nct.template_nct_9c_6 import template_nct_9c_6
from .nct.template_nct_9c_7 import template_nct_9c_7
from .nct.template_nct_9c_8 import template_nct_9c_8
from .nct.template_nct_9c_9 import template_nct_9c_9
from .nct.template_nct_9c_10 import template_nct_9c_10
from .nct.template_nct_9c_11 import template_nct_9c_11
from .nct.template_nct_9c_12 import template_nct_9c_12
from .nct.template_nct_9d_1 import template_nct_9d_1
from .nct.template_nct_9d_2 import template_nct_9d_2
from .nct.template_nct_9d_3 import template_nct_9d_3
from .nct.template_nct_9d_4 import template_nct_9d_4
from .nct.template_nct_9d_5 import template_nct_9d_5
from .nct.template_nct_9d_6 import template_nct_9d_6
from .nct.template_nct_9d_7 import template_nct_9d_7
from .nct.template_nct_9d_8 import template_nct_9d_8
from .nct.template_nct_9d_9 import template_nct_9d_9
from .nct.template_nct_9d_10 import template_nct_9d_10
| [] |
btddg28/ironpython | Tests/test_ironmath.py | 8006238c19d08db5db9bada39d765143e631059e | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
#
# test Microsoft.Scripting.Math
#
from iptest.assert_util import *
skiptest("win32")
from System import *
import clr
#silverlight already has this
if is_cli:
math_assembly = (1).GetType().Assembly
clr.AddReference(math_assembly)
load_iron_python_test()
import IronPythonTest
if is_net40:
from System.Numerics import BigInteger, Complex
else:
from Microsoft.Scripting.Math import BigInteger
from Microsoft.Scripting.Math import Complex64 as Complex
class myFormatProvider(IFormatProvider):
def ToString():pass
p = myFormatProvider()
def test_bigint():
AreEqual(BigInteger.Add(1,99999999999999999999999999999999999999999999999999999999999) ,BigInteger.Subtract(100000000000000000000000000000000000000000000000000000000001,1))
AreEqual(BigInteger.Multiply(400,500) , BigInteger.Divide(1000000,5))
AreEqual(BigInteger.Multiply(400,8) , BigInteger.LeftShift(400,3))
AreEqual(BigInteger.Divide(400,8) , BigInteger.RightShift(400,3))
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(400,100),100) , 400)
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(-12345678987654321,100),100) , -12345678987654321)
if is_net40:
AssertError(ValueError, BigInteger.RightShift, 400, -100)
AssertError(ValueError, BigInteger.LeftShift, 400, -100)
AssertError(ValueError, BigInteger.RightShift, -12345678987654321, -100)
AssertError(ValueError, BigInteger.LeftShift, -12345678987654321, -100)
else:
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(400,-100),-100) , 400)
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(-12345678987654321,-100),-100) , -12345678987654321)
AreEqual(BigInteger(-123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement().OnesComplement() , -123456781234567812345678123456781234567812345678123456781234567812345678)
AreEqual(BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement() , -(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 + 1 ))
Assert(BigInteger.Xor(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678,BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement()) , -1)
AreEqual(BigInteger.BitwiseAnd(0xff00ff00,BigInteger.BitwiseOr(0x00ff00ff,0xaabbaabb)) , BigInteger(0xaa00aa00))
AreEqual(BigInteger.Mod(BigInteger(-9999999999999999999999999999999999999999),1000000000000000000) , -BigInteger.Mod(9999999999999999999999999999999999999999,BigInteger(-1000000000000000000)))
AreEqual(BigInteger.ToInt64(0x7fffffffffffffff) , 9223372036854775807)
AssertError(OverflowError, BigInteger.ToInt64, 0x8000000000000000)
AreEqual(BigInteger(-0).ToBoolean(p) , False )
AreEqual(BigInteger(-1212321.3213).ToBoolean(p) , True )
AreEqual(BigInteger(1212321384892342394723947).ToBoolean(p) , True )
AreEqual(BigInteger(0).ToChar(p) , Char.MinValue)
AreEqual(BigInteger(65).ToChar(p) , IConvertible.ToChar('A', p))
AreEqual(BigInteger(0xffff).ToChar(p) , Char.MaxValue)
AssertError(OverflowError, BigInteger(-1).ToChar, p)
AreEqual(BigInteger(100).ToDouble(p) , 100.0)
AreEqual(BigInteger(BigInteger(100).ToDouble(p)).ToSingle(p) , BigInteger(100.1213123).ToFloat())
Assert(BigInteger(100) != 100.32)
AreEqual(BigInteger(100) , 100.0)
Assert( 100.32 != BigInteger(100))
AreEqual(100.0 , BigInteger(100) )
def test_big_1():
for (a, m, t,x) in [
(7, "ToSByte", SByte,2),
(8, "ToByte", Byte, 0),
(15, "ToInt16", Int16,2),
(16, "ToUInt16", UInt16,0),
(31, "ToInt32", Int32,2),
(32, "ToUInt32", UInt32,0),
(63, "ToInt64", Int64,2),
(64, "ToUInt64", UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)(p)
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)(p)
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)(p)
right = t.MaxValue - t.MaxValue
AreEqual(left, 0)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m),p)
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m),p)
def test_big_2():
for (a, m, t,x) in [
(31, "ToInt32",Int32,2),
(32, "ToUInt32",UInt32,0),
(63, "ToInt64",Int64,2),
(64, "ToUInt64",UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)()
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)()
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)()
right = t.MaxValue - t.MaxValue
AreEqual(left, right)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m))
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m))
#complex
def test_complex():
AreEqual(
Complex.Add(
Complex(BigInteger(9999), -1234),
Complex.Conjugate(Complex(9999, -1234)) ),
Complex.Multiply(BigInteger(9999), 2) )
AreEqual(
Complex.Add(
Complex(99999.99e-200, 12345.88e+100),
Complex.Negate(Complex(99999.99e-200, 12345.88e+100)) ),
Complex.Subtract(
Complex(99999.99e-200, 12345.88e+100),
Complex(99999.99e-200, 12345.88e+100) ))
AreEqual(
Complex.Divide(4+2j,2),
(2 + 1j) )
Assert(not hasattr(Complex, "Mod")) #IP 1.x had limited support for modulo which has been removed
def test_bool_misc():
if is_net40:
def is_zero(bigint):
return bigint.IsZero
else:
def is_zero(bigint):
return bigint.IsZero()
AreEqual(BigInteger(-1234).Sign, -1)
AreEqual(is_zero(BigInteger(-1234)), False)
AreEqual(BigInteger(-1234).IsNegative(), True)
AreEqual(BigInteger(-1234).IsPositive(), False)
AreEqual(BigInteger(0).Sign, 0)
AreEqual(is_zero(BigInteger(0)), True)
AreEqual(BigInteger(0).IsNegative(), False)
AreEqual(BigInteger(0).IsPositive(), False)
AreEqual(BigInteger(1234).Sign, 1)
AreEqual(is_zero(BigInteger(1234)), False)
AreEqual(BigInteger(1234).IsNegative(), False)
AreEqual(BigInteger(1234).IsPositive(), True)
def test_byte_conversions():
def CheckByteConversions(bigint, bytes):
SequencesAreEqual(bigint.ToByteArray(), bytes)
AreEqual(BigInteger.Create(Array[Byte](bytes)), bigint)
CheckByteConversions(BigInteger(0x00), [0x00])
CheckByteConversions(BigInteger(-0x01), [0xff])
CheckByteConversions(BigInteger(-0x81), [0x7f, 0xff])
CheckByteConversions(BigInteger(-0x100), [0x00, 0xff])
CheckByteConversions(BigInteger(-0x1000), [0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000), [0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(-0x100000), [0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000000), [0x00, 0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x100000000), [0x00, 0x00, 0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(0x7f), [0x7f])
CheckByteConversions(BigInteger(0xff), [0xff, 0x00])
CheckByteConversions(BigInteger(0x0201), [0x01, 0x02])
CheckByteConversions(BigInteger(0xf2f1), [0xf1, 0xf2, 0x00])
CheckByteConversions(BigInteger(0x03020100), [0x00, 0x01, 0x02, 0x03])
CheckByteConversions(BigInteger(0x0403020100), [0x00, 0x01, 0x02, 0x03, 0x04])
CheckByteConversions(BigInteger(0x0706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07])
CheckByteConversions(BigInteger(0x080706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08])
def test_dword_conversions():
def CheckDwordConversions(bigint, dwords):
SequencesAreEqual(bigint.GetWords(), dwords)
if bigint == BigInteger.Zero:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
0,
Array[UInt32](dwords),),
bigint)
else:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
1,
Array[UInt32](dwords)),
bigint)
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
-1,
Array[UInt32](dwords)),
BigInteger.Negate(bigint))
CheckDwordConversions(BigInteger(0), [0x00000000])
CheckDwordConversions(BigInteger(1), [0x00000001])
CheckDwordConversions(BigInteger((1<<31)), [0x80000000])
CheckDwordConversions(BigInteger(((1<<31) + 9)), [0x80000009])
CheckDwordConversions(BigInteger((1<<32)), [0x00000000, 0x00000001])
def test_misc():
AssertError(ArgumentException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, (1, 2, 3))
AssertError(ArgumentNullException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, None)
AreEqual(BigInteger(1).CompareTo(None), 1)
if is_net40:
AreEqual(BigInteger(1).CompareTo(True), 0)
else:
AssertError(ArgumentException, BigInteger(1).CompareTo, True)
run_test(__name__)
| [((30, 4, 30, 35), 'clr.AddReference', 'clr.AddReference', ({(30, 21, 30, 34): 'math_assembly'}, {}), '(math_assembly)', False, 'import clr\n'), ((48, 13, 48, 90), 'Microsoft.Scripting.Math.BigInteger.Add', 'BigInteger.Add', ({(48, 28, 48, 29): '(1)', (48, 30, 48, 89): '(99999999999999999999999999999999999999999999999999999999999)'}, {}), '(1, 99999999999999999999999999999999999999999999999999999999999)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((48, 92, 48, 175), 'Microsoft.Scripting.Math.BigInteger.Subtract', 'BigInteger.Subtract', ({(48, 112, 48, 172): '(100000000000000000000000000000000000000000000000000000000001)', (48, 173, 48, 174): '(1)'}, {}), '(\n 100000000000000000000000000000000000000000000000000000000001, 1)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((49, 13, 49, 41), 'Microsoft.Scripting.Math.BigInteger.Multiply', 'BigInteger.Multiply', ({(49, 33, 49, 36): '(400)', (49, 37, 49, 40): '(500)'}, {}), '(400, 500)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((49, 44, 49, 72), 'Microsoft.Scripting.Math.BigInteger.Divide', 'BigInteger.Divide', ({(49, 62, 49, 69): '(1000000)', (49, 70, 49, 71): '(5)'}, {}), '(1000000, 5)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((50, 13, 50, 39), 'Microsoft.Scripting.Math.BigInteger.Multiply', 'BigInteger.Multiply', ({(50, 33, 50, 36): '(400)', (50, 37, 50, 38): '(8)'}, {}), '(400, 8)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((50, 42, 50, 69), 'Microsoft.Scripting.Math.BigInteger.LeftShift', 'BigInteger.LeftShift', ({(50, 63, 50, 66): '(400)', (50, 67, 50, 68): '(3)'}, {}), '(400, 3)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((51, 13, 51, 37), 'Microsoft.Scripting.Math.BigInteger.Divide', 'BigInteger.Divide', ({(51, 31, 51, 34): '(400)', (51, 35, 51, 36): '(8)'}, {}), '(400, 8)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((51, 40, 51, 68), 'Microsoft.Scripting.Math.BigInteger.RightShift', 'BigInteger.RightShift', ({(51, 62, 51, 65): '(400)', (51, 66, 51, 67): '(3)'}, {}), '(400, 3)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((65, 93, 65, 115), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(65, 104, 65, 114): '(2852170240)'}, {}), '(2852170240)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((68, 13, 68, 51), 'Microsoft.Scripting.Math.BigInteger.ToInt64', 'BigInteger.ToInt64', ({(68, 32, 68, 50): '(9223372036854775807)'}, {}), '(9223372036854775807)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((84, 13, 84, 28), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(84, 24, 84, 27): '(100)'}, {}), '(100)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((87, 21, 87, 36), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(87, 32, 87, 35): '(100)'}, {}), '(100)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((101, 12, 101, 32), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(101, 23, 101, 30): '-x ** a'}, {}), '(-x ** a)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((106, 12, 106, 33), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(106, 23, 106, 32): '2 ** a - 1'}, {}), '(2 ** a - 1)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((111, 12, 111, 25), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(111, 23, 111, 24): '0'}, {}), '(0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((128, 12, 128, 32), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(128, 23, 128, 30): '-x ** a'}, {}), '(-x ** a)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((133, 12, 133, 33), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(133, 23, 133, 32): '2 ** a - 1'}, {}), '(2 ** a - 1)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((138, 12, 138, 25), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(138, 23, 138, 24): '0'}, {}), '(0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((162, 8, 162, 30), 'Microsoft.Scripting.Math.Complex64.Divide', 'Complex.Divide', ({(162, 23, 162, 27): '(4 + 2.0j)', (162, 28, 162, 29): '(2)'}, {}), '(4 + 2.0j, 2)', True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((196, 25, 196, 41), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(196, 36, 196, 40): '(0)'}, {}), '(0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((198, 25, 198, 42), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(198, 36, 198, 41): '(-1)'}, {}), '(-1)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((199, 25, 199, 42), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(199, 36, 199, 41): '(-129)'}, {}), '(-129)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((200, 25, 200, 43), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(200, 36, 200, 42): '(-256)'}, {}), '(-256)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((201, 25, 201, 44), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(201, 36, 201, 43): '(-4096)'}, {}), '(-4096)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((202, 25, 202, 45), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(202, 36, 202, 44): '(-65536)'}, {}), '(-65536)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((203, 25, 203, 46), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(203, 36, 203, 45): '(-1048576)'}, {}), '(-1048576)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((204, 25, 204, 48), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(204, 36, 204, 47): '(-268435456)'}, {}), '(-268435456)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((205, 25, 205, 49), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(205, 36, 205, 48): '(-4294967296)'}, {}), '(-4294967296)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((207, 25, 207, 41), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(207, 36, 207, 40): '(127)'}, {}), '(127)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((208, 25, 208, 41), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(208, 36, 208, 40): '(255)'}, {}), '(255)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((209, 25, 209, 43), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(209, 36, 209, 42): '(513)'}, {}), '(513)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((210, 25, 210, 43), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(210, 36, 210, 42): '(62193)'}, {}), '(62193)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((211, 25, 211, 47), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(211, 36, 211, 46): '(50462976)'}, {}), '(50462976)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((212, 25, 212, 49), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(212, 36, 212, 48): '(17230332160)'}, {}), '(17230332160)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((213, 25, 213, 55), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(213, 36, 213, 54): '(506097522914230528)'}, {}), '(506097522914230528)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((214, 25, 214, 57), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(214, 36, 214, 56): '(148080050112590643456)'}, {}), '(148080050112590643456)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((237, 26, 237, 39), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(237, 37, 237, 38): '(0)'}, {}), '(0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((238, 26, 238, 39), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(238, 37, 238, 38): '(1)'}, {}), '(1)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((239, 26, 239, 45), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(239, 38, 239, 43): '(1 << 31)'}, {}), '(1 << 31)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((240, 26, 240, 51), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(240, 38, 240, 49): '((1 << 31) + 9)'}, {}), '((1 << 31) + 9)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((241, 26, 241, 45), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(241, 38, 241, 43): '(1 << 32)'}, {}), '(1 << 32)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((52, 35, 52, 64), 'Microsoft.Scripting.Math.BigInteger.LeftShift', 'BigInteger.LeftShift', ({(52, 56, 52, 59): '(400)', (52, 60, 52, 63): '(100)'}, {}), '(400, 100)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((53, 35, 53, 79), 'Microsoft.Scripting.Math.BigInteger.LeftShift', 'BigInteger.LeftShift', ({(53, 56, 53, 74): '(-12345678987654321)', (53, 75, 53, 78): '(100)'}, {}), '(-12345678987654321, 100)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((65, 46, 65, 89), 'Microsoft.Scripting.Math.BigInteger.BitwiseOr', 'BigInteger.BitwiseOr', ({(65, 67, 65, 77): '(16711935)', (65, 78, 65, 88): '(2864425659)'}, {}), '(16711935, 2864425659)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((66, 28, 66, 81), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(66, 39, 66, 80): '(-9999999999999999999999999999999999999999)'}, {}), '(-9999999999999999999999999999999999999999)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((78, 31, 78, 45), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(78, 42, 78, 44): '(-1)'}, {}), '(-1)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((83, 11, 83, 26), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(83, 22, 83, 25): '(100)'}, {}), '(100)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((86, 22, 86, 37), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(86, 33, 86, 36): '(100)'}, {}), '(100)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((153, 25, 153, 41), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(153, 36, 153, 40): '(9999)'}, {}), '(9999)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((156, 12, 156, 49), 'Microsoft.Scripting.Math.Complex64', 'Complex', ({(156, 20, 156, 33): '(9.999999e-196)', (156, 35, 156, 48): '(1.234588e+104)'}, {}), '(9.999999e-196, 1.234588e+104)', True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((159, 12, 159, 49), 'Microsoft.Scripting.Math.Complex64', 'Complex', ({(159, 20, 159, 33): '(9.999999e-196)', (159, 35, 159, 48): '(1.234588e+104)'}, {}), '(9.999999e-196, 1.234588e+104)', True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((160, 12, 160, 49), 'Microsoft.Scripting.Math.Complex64', 'Complex', ({(160, 20, 160, 33): '(9.999999e-196)', (160, 35, 160, 48): '(1.234588e+104)'}, {}), '(9.999999e-196, 1.234588e+104)', True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((174, 13, 174, 30), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(174, 24, 174, 29): '(-1234)'}, {}), '(-1234)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((175, 21, 175, 38), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(175, 32, 175, 37): '(-1234)'}, {}), '(-1234)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((179, 13, 179, 26), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(179, 24, 179, 25): '(0)'}, {}), '(0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((180, 21, 180, 34), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(180, 32, 180, 33): '(0)'}, {}), '(0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((184, 13, 184, 29), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(184, 24, 184, 28): '(1234)'}, {}), '(1234)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((185, 21, 185, 37), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(185, 32, 185, 36): '(1234)'}, {}), '(1234)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((60, 38, 60, 69), 'Microsoft.Scripting.Math.BigInteger.RightShift', 'BigInteger.RightShift', ({(60, 60, 60, 63): '(400)', (60, 64, 60, 68): '(-100)'}, {}), '(400, -100)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((61, 38, 61, 84), 'Microsoft.Scripting.Math.BigInteger.RightShift', 'BigInteger.RightShift', ({(61, 60, 61, 78): '(-12345678987654321)', (61, 79, 61, 83): '(-100)'}, {}), '(-12345678987654321, -100)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((63, 13, 63, 114), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(63, 24, 63, 113): '(-\n 1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678\n )'}, {}), '(-\n 1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678\n )', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((66, 162, 66, 194), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(66, 173, 66, 193): '(-1000000000000000000)'}, {}), '(-1000000000000000000)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((71, 13, 71, 27), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(71, 24, 71, 26): '(-0)'}, {}), '(-0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((72, 13, 72, 38), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(72, 24, 72, 37): '(-1212321.3213)'}, {}), '(-1212321.3213)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((73, 13, 73, 50), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(73, 24, 73, 49): '(1212321384892342394723947)'}, {}), '(1212321384892342394723947)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((75, 13, 75, 26), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(75, 24, 75, 25): '(0)'}, {}), '(0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((76, 13, 76, 27), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(76, 24, 76, 26): '(65)'}, {}), '(65)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((77, 13, 77, 31), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(77, 24, 77, 30): '(65535)'}, {}), '(65535)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((80, 13, 80, 28), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(80, 24, 80, 27): '(100)'}, {}), '(100)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((81, 67, 81, 90), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(81, 78, 81, 89): '(100.1213123)'}, {}), '(100.1213123)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((116, 42, 116, 61), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(116, 53, 116, 59): '(2 ** a)'}, {}), '(2 ** a)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((117, 42, 117, 66), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(117, 53, 117, 64): '(-1 - x ** a)'}, {}), '(-1 - x ** a)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((143, 42, 143, 61), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(143, 53, 143, 59): '(2 ** a)'}, {}), '(2 ** a)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((144, 42, 144, 66), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(144, 53, 144, 64): '(-1 - x ** a)'}, {}), '(-1 - x ** a)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((151, 20, 151, 36), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(151, 31, 151, 35): '(9999)'}, {}), '(9999)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((152, 30, 152, 50), 'Microsoft.Scripting.Math.Complex64', 'Complex', ({(152, 38, 152, 42): '(9999)', (152, 44, 152, 49): '(-1234)'}, {}), '(9999, -1234)', True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((157, 27, 157, 64), 'Microsoft.Scripting.Math.Complex64', 'Complex', ({(157, 35, 157, 48): '(9.999999e-196)', (157, 50, 157, 63): '(1.234588e+104)'}, {}), '(9.999999e-196, 1.234588e+104)', True, 'from Microsoft.Scripting.Math import Complex64 as Complex\n'), ((176, 13, 176, 30), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(176, 24, 176, 29): '(-1234)'}, {}), '(-1234)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((177, 13, 177, 30), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(177, 24, 177, 29): '(-1234)'}, {}), '(-1234)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((181, 13, 181, 26), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(181, 24, 181, 25): '(0)'}, {}), '(0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((182, 13, 182, 26), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(182, 24, 182, 25): '(0)'}, {}), '(0)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((186, 13, 186, 29), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(186, 24, 186, 28): '(1234)'}, {}), '(1234)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((187, 13, 187, 29), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(187, 24, 187, 28): '(1234)'}, {}), '(1234)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((235, 16, 235, 41), 'Microsoft.Scripting.Math.BigInteger.Negate', 'BigInteger.Negate', ({(235, 34, 235, 40): 'bigint'}, {}), '(bigint)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((247, 13, 247, 26), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(247, 24, 247, 25): '(1)'}, {}), '(1)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((251, 39, 251, 52), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(251, 50, 251, 51): '(1)'}, {}), '(1)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((64, 116, 64, 217), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(64, 127, 64, 216): '(-\n 1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678\n )'}, {}), '(-\n 1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678\n )', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((249, 17, 249, 30), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(249, 28, 249, 29): '(1)'}, {}), '(1)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((62, 13, 62, 98), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(62, 24, 62, 97): '(-123456781234567812345678123456781234567812345678123456781234567812345678)'}, {}), '(-\n 123456781234567812345678123456781234567812345678123456781234567812345678)', False, 'from Microsoft.Scripting.Math import BigInteger\n'), ((81, 24, 81, 39), 'Microsoft.Scripting.Math.BigInteger', 'BigInteger', ({(81, 35, 81, 38): '(100)'}, {}), '(100)', False, 'from Microsoft.Scripting.Math import BigInteger\n')] |
timtyree/bgmc | python/lib/viewer/gener_q_vs_w_for_df.py | 891e003a9594be9e40c53822879421c2b8c44eed | import matplotlib.pyplot as plt, numpy as np, pandas as pd,os
from ..model import recall_powerlaw_fits_to_full_models
from .. import compute_power_rmse
from .bluf import *
from ..measure.powerlaw import *
from .gener_q_vs_w_for_result_folder import *
def q_vs_w_plotter_function_from_df(ax,df):
# npartitions=os.cpu_count()
fontsize=16
printing=False
alpha=0.5
markersize=50#5
xlabel=r'q (cm$^{-2}$)'
ylabel=r'w (Hz cm$^{-2}$)'
c='C3'
xlim=[.1,1.05]
ylim=[0.,20]
# xlim=[-0.05,1.05]
# ylim=[1e-1,20]#[1e-5,1e4]
legend_fontsize=fontsize-6
title_fontsize=fontsize-8
x_values=df.q.values
y_values=df.w.values
#extract column values
r_values=np.array(sorted(set(df.r.values)))#cm
D_values=np.array(sorted(set(df.D.values)))#cm^2/s
L_values=np.array(sorted(set(df.L.values)))#cm
A_values=L_values**2#cm^2
kappa_values=np.array(sorted(set(df.kappa.values)))#1/s
varkappa_values=np.array(sorted(set(df.varkappa.values)))#1/s
x0_values=np.array(sorted(set(df.x0.values)))#1/s
set_second_values=np.array(sorted(set(df.set_second.values)))
reflect_values=np.array(sorted(set(df.reflect.values)))
no_repulsion_values=np.array(sorted(set(df.no_repulsion.values)))
no_attraction_values=np.array(sorted(set(df.no_attraction.values)))
neighbor_values=np.array(sorted(set(df.neighbor.values)))
force_code_values=np.array(sorted(set(df.force_code.values)))
if printing:
print(f"input parameters:")
print(f"r~{r_values}")
print(f"D~{D_values}")
print(f"L~{L_values}")
print(f"kappa~{kappa_values}")
print(f"a~{varkappa_values}")
print(f"x0~{x0_values}")
print(f"set_second~{set_second_values}")
print(f"reflect~{reflect_values}")
print(f"no_repulsion~{no_repulsion_values}")
print(f"no_attraction~{no_attraction_values}")
print(f"neighbor~{neighbor_values}")
print(f"force_code~{force_code_values}")
#TDOO: compute xy values
#compute title=
# title=r"$\nu$="+f"{m:.3f}, "+f"M={M:.3f}"+r" cm$^2$/s\n"
# additional parameters optional/uncommentable...
title=f"force_code={int(force_code_values[0])}, neighbors={int(neighbor_values[0])}, reflect={int(reflect_values[0])}\n"
title=title+r'$r=$'+f'{r_values[0]:.5f} cm, '
title=title+r'$\kappa=$'+f'{kappa_values[0]:.5f} Hz\n'
title=title+r'$D=$'+f'{D_values[0]:.5f} cm'+r'$^2$/s, '
title=title+r'$a=$'+f'{varkappa_values[0]:.5f} cm'+r'$^2$/s, '
title=title+r'$x_0=$'+f'{x0_values[0]:.0f} cm\n'
#DONE: plot the data
PlotFullModels(ax,xlim=[0.1,1])
FormatAxes(ax,xlim,ylim,xlabel,ylabel,title,fontsize=fontsize,use_loglog=False)#,**kwargs)
PlotTrial(ax, x_values,y_values,title,title_fontsize)
ax.legend(fontsize=legend_fontsize,ncol=1,loc='upper left')
return True
def q_vs_Delta_w_plotter_function_from_df(ax,df):
fontsize=16
use_Delta_thresh=True
use_error_bars=True
percent_uncertainty=1.
printing=False
alpha=0.5
markersize=50#5
xlabel=r'q (cm$^{-2}$)'
ylabel=r'w (Hz cm$^{-2}$)'
c='C3'
xlim=[.1,1.05]
ylim=[-1,1]
legend_fontsize=fontsize-6
title_fontsize=fontsize-8
use_error_bars=True
percent_uncertainty=1.
x_values=df.q.values
y_values=df.w.values
if use_error_bars:
yerr_values=percent_uncertainty/100*y_values
#compute the error
model_name,m,M=compute_nearest_powerlaw_fit(x_values,y_values)
yhat_values=M*x_values**m
Delta_y_values=y_values-yhat_values
y_values=Delta_y_values
# TODO: compute rmse between
# the particle model and the full model
rmse_particle_vs_full=np.sqrt(np.mean(Delta_y_values**2))
Delta_thresh=rmse_particle_vs_full
#TODO: compute the apparent powerlaw fit of the particle model
x_values=df.q.values
y_values=df.w.values
B,Delta_B,m,Delta_m,Rsq=fit_power_law(x_values,y_values)
rmse_particle_vs_powerlawfit=compute_power_rmse(x_values,y_values, m, B)
M, Delta_M= comp_power_scale(B,Delta_B,m,Delta_m)
Delta_y_values=y_values-yhat_values
y_values=Delta_y_values
#extract column values
r_values=np.array(sorted(set(df.r.values)))#cm
D_values=np.array(sorted(set(df.D.values)))#cm^2/s
L_values=np.array(sorted(set(df.L.values)))#cm
A_values=L_values**2#cm^2
kappa_values=np.array(sorted(set(df.kappa.values)))#1/s
varkappa_values=np.array(sorted(set(df.varkappa.values)))#1/s
x0_values=np.array(sorted(set(df.x0.values)))#1/s
set_second_values=np.array(sorted(set(df.set_second.values)))
reflect_values=np.array(sorted(set(df.reflect.values)))
no_repulsion_values=np.array(sorted(set(df.no_repulsion.values)))
no_attraction_values=np.array(sorted(set(df.no_attraction.values)))
neighbor_values=np.array(sorted(set(df.neighbor.values)))
force_code_values=np.array(sorted(set(df.force_code.values)))
if printing:
print(f"input parameters:")
print(f"r~{r_values}")
print(f"D~{D_values}")
print(f"L~{L_values}")
print(f"kappa~{kappa_values}")
print(f"a~{varkappa_values}")
print(f"x0~{x0_values}")
print(f"set_second~{set_second_values}")
print(f"reflect~{reflect_values}")
print(f"no_repulsion~{no_repulsion_values}")
print(f"no_attraction~{no_attraction_values}")
print(f"neighbor~{neighbor_values}")
print(f"force_code~{force_code_values}")
#TODO: compute the powerlaw fit for the x and y values and set them equal to m,M,Delta_m,Delta_M
#TODO: modify title to take m,M,Delta_m,Delta_M
#compute title= string
title=r"$\nu$="+f"{m:.3f}"+r"$\pm$"+f"{Delta_m:.3f}"
title=title+f", M={M:.3f}"+r"$\pm$"+f"{Delta_M:.3f} "+r"cm$^{2(\nu-1)}$/s"
title=title+f"\n"+r"RMSE$_{particle\;vs\;full}=$"+f"{rmse_particle_vs_full:.3f} Hz/cm"+r"^2"+f"\n"
#additional parameters optional/uncommentable...
# title=f"force_code={int(force_code_values[0])}, neighbors={int(neighbor_values[0])}, reflect={int(reflect_values[0])}\n"
# title=title+r'$r=$'+f'{r_values[0]:.2f} cm, '
# title=title+r'$\kappa=$'+f'{kappa_values[0]:.2f} Hz\n'
# title=title+r'$D=$'+f'{D_values[0]:.2f} cm'+r'$^2$/s, '
# title=title+r'$a=$'+f'{varkappa_values[0]:.2f} cm'+r'$^2$/s, '
# title=title+r'$x_0=$'+f'{x0_values[0]:.0f} cm\n'
# plot_horizontal solid & dashed
plot_horizontal(ax,xlim,Delta_thresh=Delta_thresh,use_Delta_thresh=use_Delta_thresh)
FormatAxes(ax,xlim,ylim,xlabel,ylabel,title,fontsize=fontsize,use_loglog=False)#,**kwargs)
#plot the data
if not use_error_bars:
PlotTrial(ax, x_values,y_values,title,title_fontsize)
else:
PlotErrorBarScatter(ax, x_values,y_values,yerr_values,title,title_fontsize)
# ax.legend(fontsize=legend_fontsize,ncol=1,loc='upper left')
return True
| [((108, 34, 108, 60), 'numpy.mean', 'np.mean', ({(108, 42, 108, 59): 'Delta_y_values ** 2'}, {}), '(Delta_y_values ** 2)', True, 'import matplotlib.pyplot as plt, numpy as np, pandas as pd, os\n')] |
achyudh/castor | decatt/model.py | d7a02ce03f2b71ef1fa490122dd4bbc8214b8b19 | import sys
import math
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class DecAtt(nn.Module):
def __init__(self, num_units, num_classes, embedding_size, dropout, device=0,
training=True, project_input=True,
use_intra_attention=False, distance_biases=10, max_sentence_length=30):
"""
Create the model based on MLP networks.
:param num_units: size of the networks
:param num_classes: number of classes in the problem
:param embedding_size: size of each word embedding
:param use_intra_attention: whether to use intra-attention model
:param training: whether to create training tensors (optimizer)
:p/word_embeddingaram project_input: whether to project input embeddings to a
different dimensionality
:param distance_biases: number of different distances with biases used
in the intra-attention model
"""
super().__init__()
self.arch = "DecAtt"
self.num_units = num_units
self.num_classes = num_classes
self.project_input = project_input
self.embedding_size = embedding_size
self.distance_biases = distance_biases
self.intra_attention = False
self.max_sentence_length = max_sentence_length
self.device = device
self.bias_embedding = nn.Embedding(max_sentence_length,1)
self.linear_layer_project = nn.Linear(embedding_size, num_units, bias=False)
#self.linear_layer_intra = nn.Sequential(nn.Linear(num_units, num_units), nn.ReLU(), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_attend = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_compare = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_aggregate = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Linear(num_units, num_classes), nn.LogSoftmax())
self.init_weight()
def init_weight(self):
self.linear_layer_project.weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].bias.data.fill_(0)
self.linear_layer_attend[4].weight.data.normal_(0, 0.01)
self.linear_layer_attend[4].bias.data.fill_(0)
self.linear_layer_compare[1].weight.data.normal_(0, 0.01)
self.linear_layer_compare[1].bias.data.fill_(0)
self.linear_layer_compare[4].weight.data.normal_(0, 0.01)
self.linear_layer_compare[4].bias.data.fill_(0)
self.linear_layer_aggregate[1].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[1].bias.data.fill_(0)
self.linear_layer_aggregate[4].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[4].bias.data.fill_(0)
#self.word_embedding.weight.data.copy_(torch.from_numpy(self.pretrained_emb))
def attention_softmax3d(self, raw_attentions):
reshaped_attentions = raw_attentions.view(-1, raw_attentions.size(2))
out = nn.functional.softmax(reshaped_attentions, dim=1)
return out.view(raw_attentions.size(0),raw_attentions.size(1),raw_attentions.size(2))
def _transformation_input(self, embed_sent):
embed_sent = self.linear_layer_project(embed_sent)
result = embed_sent
if self.intra_attention:
f_intra = self.linear_layer_intra(embed_sent)
f_intra_t = torch.transpose(f_intra, 1, 2)
raw_attentions = torch.matmul(f_intra, f_intra_t)
time_steps = embed_sent.size(1)
r = torch.arange(0, time_steps)
r_matrix = r.view(1,-1).expand(time_steps,time_steps)
raw_index = r_matrix-r.view(-1,1)
clipped_index = torch.clamp(raw_index,0,self.distance_biases-1)
clipped_index = Variable(clipped_index.long())
if torch.cuda.is_available():
clipped_index = clipped_index.to(self.device)
bias = self.bias_embedding(clipped_index)
bias = torch.squeeze(bias)
raw_attentions += bias
attentions = self.attention_softmax3d(raw_attentions)
attended = torch.matmul(attentions, embed_sent)
result = torch.cat([embed_sent,attended],2)
return result
def attend(self, sent1, sent2, lsize_list, rsize_list):
"""
Compute inter-sentence attention. This is step 1 (attend) in the paper
:param sent1: tensor in shape (batch, time_steps, num_units),
the projected sentence 1
:param sent2: tensor in shape (batch, time_steps, num_units)
:return: a tuple of 3-d tensors, alfa and beta.
"""
repr1 = self.linear_layer_attend(sent1)
repr2 = self.linear_layer_attend(sent2)
repr2 = torch.transpose(repr2,1,2)
raw_attentions = torch.matmul(repr1, repr2)
#self.mask = generate_mask(lsize_list, rsize_list)
# masked = mask(self.raw_attentions, rsize_list)
#masked = raw_attentions * self.mask
att_sent1 = self.attention_softmax3d(raw_attentions)
beta = torch.matmul(att_sent1, sent2) #input2_soft
raw_attentions_t = torch.transpose(raw_attentions,1,2).contiguous()
#self.mask_t = torch.transpose(self.mask, 1, 2).contiguous()
# masked = mask(raw_attentions_t, lsize_list)
#masked = raw_attentions_t * self.mask_t
att_sent2 = self.attention_softmax3d(raw_attentions_t)
alpha = torch.matmul(att_sent2,sent1) #input1_soft
return alpha, beta
def compare(self, sentence, soft_alignment):
"""
Apply a feed forward network to compare o ne sentence to its
soft alignment with the other.
:param sentence: embedded and projected sentence,
shape (batch, time_steps, num_units)
:param soft_alignment: tensor with shape (batch, time_steps, num_units)
:return: a tensor (batch, time_steps, num_units)
"""
sent_alignment = torch.cat([sentence, soft_alignment],2)
out = self.linear_layer_compare(sent_alignment)
#out, (state, _) = self.lstm_compare(out)
return out
def aggregate(self, v1, v2):
"""
Aggregate the representations induced from both sentences and their
representations
:param v1: tensor with shape (batch, time_steps, num_units)
:param v2: tensor with shape (batch, time_steps, num_units)
:return: logits over classes, shape (batch, num_classes)
"""
v1_sum = torch.sum(v1,1)
v2_sum = torch.sum(v2,1)
out = self.linear_layer_aggregate(torch.cat([v1_sum,v2_sum],1))
return out
def forward(self, sent1, sent2, ext_feats=None, word_to_doc_count=None, raw_sent1=None, raw_sent2=None):
lsize_list = [len(s.split(" ")) for s in raw_sent1]
rsize_list = [len(s.split(" ")) for s in raw_sent2]
sent1 = sent1.permute(0, 2, 1)
sent2 = sent2.permute(0, 2, 1)
sent1 = self._transformation_input(sent1)
sent2 = self._transformation_input(sent2)
alpha, beta = self.attend(sent1, sent2, lsize_list, rsize_list)
v1 = self.compare(sent1, beta)
v2 = self.compare(sent2, alpha)
logits = self.aggregate(v1, v2)
return logits
| [((41, 30, 41, 65), 'torch.nn.Embedding', 'nn.Embedding', ({(41, 43, 41, 62): 'max_sentence_length', (41, 63, 41, 64): '1'}, {}), '(max_sentence_length, 1)', True, 'import torch.nn as nn\n'), ((42, 36, 42, 84), 'torch.nn.Linear', 'nn.Linear', (), '', True, 'import torch.nn as nn\n'), ((74, 14, 74, 63), 'torch.nn.functional.softmax', 'nn.functional.softmax', (), '', True, 'import torch.nn as nn\n'), ((111, 16, 111, 42), 'torch.transpose', 'torch.transpose', ({(111, 32, 111, 37): 'repr2', (111, 38, 111, 39): '1', (111, 40, 111, 41): '2'}, {}), '(repr2, 1, 2)', False, 'import torch\n'), ((112, 25, 112, 51), 'torch.matmul', 'torch.matmul', ({(112, 38, 112, 43): 'repr1', (112, 45, 112, 50): 'repr2'}, {}), '(repr1, repr2)', False, 'import torch\n'), ((118, 15, 118, 45), 'torch.matmul', 'torch.matmul', ({(118, 28, 118, 37): 'att_sent1', (118, 39, 118, 44): 'sent2'}, {}), '(att_sent1, sent2)', False, 'import torch\n'), ((125, 16, 125, 45), 'torch.matmul', 'torch.matmul', ({(125, 29, 125, 38): 'att_sent2', (125, 39, 125, 44): 'sent1'}, {}), '(att_sent2, sent1)', False, 'import torch\n'), ((139, 25, 139, 64), 'torch.cat', 'torch.cat', ({(139, 35, 139, 61): '[sentence, soft_alignment]', (139, 62, 139, 63): '2'}, {}), '([sentence, soft_alignment], 2)', False, 'import torch\n'), ((153, 17, 153, 32), 'torch.sum', 'torch.sum', ({(153, 27, 153, 29): 'v1', (153, 30, 153, 31): '1'}, {}), '(v1, 1)', False, 'import torch\n'), ((154, 17, 154, 32), 'torch.sum', 'torch.sum', ({(154, 27, 154, 29): 'v2', (154, 30, 154, 31): '1'}, {}), '(v2, 1)', False, 'import torch\n'), ((45, 49, 45, 70), 'torch.nn.Dropout', 'nn.Dropout', (), '', True, 'import torch.nn as nn\n'), ((45, 72, 45, 103), 'torch.nn.Linear', 'nn.Linear', ({(45, 82, 45, 91): 'num_units', (45, 93, 45, 102): 'num_units'}, {}), '(num_units, num_units)', True, 'import torch.nn as nn\n'), ((45, 105, 45, 114), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((46, 49, 46, 70), 'torch.nn.Dropout', 'nn.Dropout', (), '', True, 'import torch.nn as nn\n'), ((46, 72, 46, 103), 'torch.nn.Linear', 'nn.Linear', ({(46, 82, 46, 91): 'num_units', (46, 93, 46, 102): 'num_units'}, {}), '(num_units, num_units)', True, 'import torch.nn as nn\n'), ((46, 105, 46, 114), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((48, 50, 48, 71), 'torch.nn.Dropout', 'nn.Dropout', (), '', True, 'import torch.nn as nn\n'), ((48, 73, 48, 106), 'torch.nn.Linear', 'nn.Linear', ({(48, 83, 48, 94): 'num_units * 2', (48, 96, 48, 105): 'num_units'}, {}), '(num_units * 2, num_units)', True, 'import torch.nn as nn\n'), ((48, 108, 48, 117), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((49, 50, 49, 71), 'torch.nn.Dropout', 'nn.Dropout', (), '', True, 'import torch.nn as nn\n'), ((49, 73, 49, 104), 'torch.nn.Linear', 'nn.Linear', ({(49, 83, 49, 92): 'num_units', (49, 94, 49, 103): 'num_units'}, {}), '(num_units, num_units)', True, 'import torch.nn as nn\n'), ((49, 106, 49, 115), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((51, 52, 51, 73), 'torch.nn.Dropout', 'nn.Dropout', (), '', True, 'import torch.nn as nn\n'), ((51, 75, 51, 108), 'torch.nn.Linear', 'nn.Linear', ({(51, 85, 51, 96): 'num_units * 2', (51, 98, 51, 107): 'num_units'}, {}), '(num_units * 2, num_units)', True, 'import torch.nn as nn\n'), ((51, 110, 51, 119), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((52, 52, 52, 73), 'torch.nn.Dropout', 'nn.Dropout', (), '', True, 'import torch.nn as nn\n'), ((52, 75, 52, 106), 'torch.nn.Linear', 'nn.Linear', ({(52, 85, 52, 94): 'num_units', (52, 96, 52, 105): 'num_units'}, {}), '(num_units, num_units)', True, 'import torch.nn as nn\n'), ((52, 108, 52, 117), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((53, 52, 53, 85), 'torch.nn.Linear', 'nn.Linear', ({(53, 62, 53, 71): 'num_units', (53, 73, 53, 84): 'num_classes'}, {}), '(num_units, num_classes)', True, 'import torch.nn as nn\n'), ((53, 87, 53, 102), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((82, 24, 82, 54), 'torch.transpose', 'torch.transpose', ({(82, 40, 82, 47): 'f_intra', (82, 49, 82, 50): '1', (82, 52, 82, 53): '2'}, {}), '(f_intra, 1, 2)', False, 'import torch\n'), ((83, 29, 83, 61), 'torch.matmul', 'torch.matmul', ({(83, 42, 83, 49): 'f_intra', (83, 51, 83, 60): 'f_intra_t'}, {}), '(f_intra, f_intra_t)', False, 'import torch\n'), ((85, 16, 85, 43), 'torch.arange', 'torch.arange', ({(85, 29, 85, 30): '0', (85, 32, 85, 42): 'time_steps'}, {}), '(0, time_steps)', False, 'import torch\n'), ((88, 28, 88, 75), 'torch.clamp', 'torch.clamp', ({(88, 40, 88, 49): 'raw_index', (88, 50, 88, 51): '0', (88, 52, 88, 74): 'self.distance_biases - 1'}, {}), '(raw_index, 0, self.distance_biases - 1)', False, 'import torch\n'), ((90, 15, 90, 40), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((93, 19, 93, 38), 'torch.squeeze', 'torch.squeeze', ({(93, 33, 93, 37): 'bias'}, {}), '(bias)', False, 'import torch\n'), ((96, 23, 96, 59), 'torch.matmul', 'torch.matmul', ({(96, 36, 96, 46): 'attentions', (96, 48, 96, 58): 'embed_sent'}, {}), '(attentions, embed_sent)', False, 'import torch\n'), ((97, 21, 97, 55), 'torch.cat', 'torch.cat', ({(97, 31, 97, 52): '[embed_sent, attended]', (97, 53, 97, 54): '2'}, {}), '([embed_sent, attended], 2)', False, 'import torch\n'), ((155, 42, 155, 70), 'torch.cat', 'torch.cat', ({(155, 52, 155, 67): '[v1_sum, v2_sum]', (155, 68, 155, 69): '1'}, {}), '([v1_sum, v2_sum], 1)', False, 'import torch\n'), ((120, 27, 120, 62), 'torch.transpose', 'torch.transpose', ({(120, 43, 120, 57): 'raw_attentions', (120, 58, 120, 59): '1', (120, 60, 120, 61): '2'}, {}), '(raw_attentions, 1, 2)', False, 'import torch\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.